pnfs_nfs.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986
  1. /*
  2. * Common NFS I/O operations for the pnfs file based
  3. * layout drivers.
  4. *
  5. * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
  6. *
  7. * Tom Haynes <loghyr@primarydata.com>
  8. */
  9. #include <linux/nfs_fs.h>
  10. #include <linux/nfs_page.h>
  11. #include <linux/sunrpc/addr.h>
  12. #include <linux/module.h>
  13. #include "nfs4session.h"
  14. #include "internal.h"
  15. #include "pnfs.h"
  16. #define NFSDBG_FACILITY NFSDBG_PNFS
  17. void pnfs_generic_rw_release(void *data)
  18. {
  19. struct nfs_pgio_header *hdr = data;
  20. nfs_put_client(hdr->ds_clp);
  21. hdr->mds_ops->rpc_release(data);
  22. }
  23. EXPORT_SYMBOL_GPL(pnfs_generic_rw_release);
  24. /* Fake up some data that will cause nfs_commit_release to retry the writes. */
  25. void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data)
  26. {
  27. struct nfs_page *first = nfs_list_entry(data->pages.next);
  28. data->task.tk_status = 0;
  29. memcpy(&data->verf.verifier, &first->wb_verf,
  30. sizeof(data->verf.verifier));
  31. data->verf.verifier.data[0]++; /* ensure verifier mismatch */
  32. }
  33. EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes);
  34. void pnfs_generic_write_commit_done(struct rpc_task *task, void *data)
  35. {
  36. struct nfs_commit_data *wdata = data;
  37. /* Note this may cause RPC to be resent */
  38. wdata->mds_ops->rpc_call_done(task, data);
  39. }
  40. EXPORT_SYMBOL_GPL(pnfs_generic_write_commit_done);
  41. void pnfs_generic_commit_release(void *calldata)
  42. {
  43. struct nfs_commit_data *data = calldata;
  44. data->completion_ops->completion(data);
  45. pnfs_put_lseg(data->lseg);
  46. nfs_put_client(data->ds_clp);
  47. nfs_commitdata_release(data);
  48. }
  49. EXPORT_SYMBOL_GPL(pnfs_generic_commit_release);
  50. /* The generic layer is about to remove the req from the commit list.
  51. * If this will make the bucket empty, it will need to put the lseg reference.
  52. * Note this must be called holding i_lock
  53. */
  54. void
  55. pnfs_generic_clear_request_commit(struct nfs_page *req,
  56. struct nfs_commit_info *cinfo)
  57. {
  58. struct pnfs_layout_segment *freeme = NULL;
  59. if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
  60. goto out;
  61. cinfo->ds->nwritten--;
  62. if (list_is_singular(&req->wb_list)) {
  63. struct pnfs_commit_bucket *bucket;
  64. bucket = list_first_entry(&req->wb_list,
  65. struct pnfs_commit_bucket,
  66. written);
  67. freeme = bucket->wlseg;
  68. bucket->wlseg = NULL;
  69. }
  70. out:
  71. nfs_request_remove_commit_list(req, cinfo);
  72. pnfs_put_lseg_locked(freeme);
  73. }
  74. EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit);
  75. static int
  76. pnfs_generic_transfer_commit_list(struct list_head *src, struct list_head *dst,
  77. struct nfs_commit_info *cinfo, int max)
  78. {
  79. struct nfs_page *req, *tmp;
  80. int ret = 0;
  81. list_for_each_entry_safe(req, tmp, src, wb_list) {
  82. if (!nfs_lock_request(req))
  83. continue;
  84. kref_get(&req->wb_kref);
  85. if (cond_resched_lock(&cinfo->inode->i_lock))
  86. list_safe_reset_next(req, tmp, wb_list);
  87. nfs_request_remove_commit_list(req, cinfo);
  88. clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
  89. nfs_list_add_request(req, dst);
  90. ret++;
  91. if ((ret == max) && !cinfo->dreq)
  92. break;
  93. }
  94. return ret;
  95. }
  96. static int
  97. pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
  98. struct nfs_commit_info *cinfo,
  99. int max)
  100. {
  101. struct list_head *src = &bucket->written;
  102. struct list_head *dst = &bucket->committing;
  103. int ret;
  104. lockdep_assert_held(&cinfo->inode->i_lock);
  105. ret = pnfs_generic_transfer_commit_list(src, dst, cinfo, max);
  106. if (ret) {
  107. cinfo->ds->nwritten -= ret;
  108. cinfo->ds->ncommitting += ret;
  109. if (bucket->clseg == NULL)
  110. bucket->clseg = pnfs_get_lseg(bucket->wlseg);
  111. if (list_empty(src)) {
  112. pnfs_put_lseg_locked(bucket->wlseg);
  113. bucket->wlseg = NULL;
  114. }
  115. }
  116. return ret;
  117. }
  118. /* Move reqs from written to committing lists, returning count
  119. * of number moved.
  120. */
  121. int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo,
  122. int max)
  123. {
  124. int i, rv = 0, cnt;
  125. lockdep_assert_held(&cinfo->inode->i_lock);
  126. for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
  127. cnt = pnfs_generic_scan_ds_commit_list(&cinfo->ds->buckets[i],
  128. cinfo, max);
  129. max -= cnt;
  130. rv += cnt;
  131. }
  132. return rv;
  133. }
  134. EXPORT_SYMBOL_GPL(pnfs_generic_scan_commit_lists);
  135. /* Pull everything off the committing lists and dump into @dst. */
  136. void pnfs_generic_recover_commit_reqs(struct list_head *dst,
  137. struct nfs_commit_info *cinfo)
  138. {
  139. struct pnfs_commit_bucket *b;
  140. struct pnfs_layout_segment *freeme;
  141. int i;
  142. lockdep_assert_held(&cinfo->inode->i_lock);
  143. restart:
  144. for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
  145. if (pnfs_generic_transfer_commit_list(&b->written, dst,
  146. cinfo, 0)) {
  147. freeme = b->wlseg;
  148. b->wlseg = NULL;
  149. spin_unlock(&cinfo->inode->i_lock);
  150. pnfs_put_lseg(freeme);
  151. spin_lock(&cinfo->inode->i_lock);
  152. goto restart;
  153. }
  154. }
  155. cinfo->ds->nwritten = 0;
  156. }
  157. EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs);
  158. static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
  159. {
  160. struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
  161. struct pnfs_commit_bucket *bucket;
  162. struct pnfs_layout_segment *freeme;
  163. LIST_HEAD(pages);
  164. int i;
  165. spin_lock(&cinfo->inode->i_lock);
  166. for (i = idx; i < fl_cinfo->nbuckets; i++) {
  167. bucket = &fl_cinfo->buckets[i];
  168. if (list_empty(&bucket->committing))
  169. continue;
  170. freeme = bucket->clseg;
  171. bucket->clseg = NULL;
  172. list_splice_init(&bucket->committing, &pages);
  173. spin_unlock(&cinfo->inode->i_lock);
  174. nfs_retry_commit(&pages, freeme, cinfo, i);
  175. pnfs_put_lseg(freeme);
  176. spin_lock(&cinfo->inode->i_lock);
  177. }
  178. spin_unlock(&cinfo->inode->i_lock);
  179. }
  180. static unsigned int
  181. pnfs_generic_alloc_ds_commits(struct nfs_commit_info *cinfo,
  182. struct list_head *list)
  183. {
  184. struct pnfs_ds_commit_info *fl_cinfo;
  185. struct pnfs_commit_bucket *bucket;
  186. struct nfs_commit_data *data;
  187. int i;
  188. unsigned int nreq = 0;
  189. fl_cinfo = cinfo->ds;
  190. bucket = fl_cinfo->buckets;
  191. for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) {
  192. if (list_empty(&bucket->committing))
  193. continue;
  194. data = nfs_commitdata_alloc();
  195. if (!data)
  196. break;
  197. data->ds_commit_index = i;
  198. list_add(&data->pages, list);
  199. nreq++;
  200. }
  201. /* Clean up on error */
  202. pnfs_generic_retry_commit(cinfo, i);
  203. return nreq;
  204. }
  205. static inline
  206. void pnfs_fetch_commit_bucket_list(struct list_head *pages,
  207. struct nfs_commit_data *data,
  208. struct nfs_commit_info *cinfo)
  209. {
  210. struct pnfs_commit_bucket *bucket;
  211. bucket = &cinfo->ds->buckets[data->ds_commit_index];
  212. spin_lock(&cinfo->inode->i_lock);
  213. list_splice_init(&bucket->committing, pages);
  214. data->lseg = bucket->clseg;
  215. bucket->clseg = NULL;
  216. spin_unlock(&cinfo->inode->i_lock);
  217. }
  218. /* Helper function for pnfs_generic_commit_pagelist to catch an empty
  219. * page list. This can happen when two commits race.
  220. *
  221. * This must be called instead of nfs_init_commit - call one or the other, but
  222. * not both!
  223. */
  224. static bool
  225. pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
  226. struct nfs_commit_data *data,
  227. struct nfs_commit_info *cinfo)
  228. {
  229. if (list_empty(pages)) {
  230. if (atomic_dec_and_test(&cinfo->mds->rpcs_out))
  231. wake_up_atomic_t(&cinfo->mds->rpcs_out);
  232. /* don't call nfs_commitdata_release - it tries to put
  233. * the open_context which is not acquired until nfs_init_commit
  234. * which has not been called on @data */
  235. WARN_ON_ONCE(data->context);
  236. nfs_commit_free(data);
  237. return true;
  238. }
  239. return false;
  240. }
  241. /* This follows nfs_commit_list pretty closely */
  242. int
  243. pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
  244. int how, struct nfs_commit_info *cinfo,
  245. int (*initiate_commit)(struct nfs_commit_data *data,
  246. int how))
  247. {
  248. struct nfs_commit_data *data, *tmp;
  249. LIST_HEAD(list);
  250. unsigned int nreq = 0;
  251. if (!list_empty(mds_pages)) {
  252. data = nfs_commitdata_alloc();
  253. if (data != NULL) {
  254. data->ds_commit_index = -1;
  255. list_add(&data->pages, &list);
  256. nreq++;
  257. } else {
  258. nfs_retry_commit(mds_pages, NULL, cinfo, 0);
  259. pnfs_generic_retry_commit(cinfo, 0);
  260. return -ENOMEM;
  261. }
  262. }
  263. nreq += pnfs_generic_alloc_ds_commits(cinfo, &list);
  264. if (nreq == 0)
  265. goto out;
  266. atomic_add(nreq, &cinfo->mds->rpcs_out);
  267. list_for_each_entry_safe(data, tmp, &list, pages) {
  268. list_del_init(&data->pages);
  269. if (data->ds_commit_index < 0) {
  270. /* another commit raced with us */
  271. if (pnfs_generic_commit_cancel_empty_pagelist(mds_pages,
  272. data, cinfo))
  273. continue;
  274. nfs_init_commit(data, mds_pages, NULL, cinfo);
  275. nfs_initiate_commit(NFS_CLIENT(inode), data,
  276. NFS_PROTO(data->inode),
  277. data->mds_ops, how, 0);
  278. } else {
  279. LIST_HEAD(pages);
  280. pnfs_fetch_commit_bucket_list(&pages, data, cinfo);
  281. /* another commit raced with us */
  282. if (pnfs_generic_commit_cancel_empty_pagelist(&pages,
  283. data, cinfo))
  284. continue;
  285. nfs_init_commit(data, &pages, data->lseg, cinfo);
  286. initiate_commit(data, how);
  287. }
  288. }
  289. out:
  290. cinfo->ds->ncommitting = 0;
  291. return PNFS_ATTEMPTED;
  292. }
  293. EXPORT_SYMBOL_GPL(pnfs_generic_commit_pagelist);
  294. /*
  295. * Data server cache
  296. *
  297. * Data servers can be mapped to different device ids.
  298. * nfs4_pnfs_ds reference counting
  299. * - set to 1 on allocation
  300. * - incremented when a device id maps a data server already in the cache.
  301. * - decremented when deviceid is removed from the cache.
  302. */
  303. static DEFINE_SPINLOCK(nfs4_ds_cache_lock);
  304. static LIST_HEAD(nfs4_data_server_cache);
  305. /* Debug routines */
  306. static void
  307. print_ds(struct nfs4_pnfs_ds *ds)
  308. {
  309. if (ds == NULL) {
  310. printk(KERN_WARNING "%s NULL device\n", __func__);
  311. return;
  312. }
  313. printk(KERN_WARNING " ds %s\n"
  314. " ref count %d\n"
  315. " client %p\n"
  316. " cl_exchange_flags %x\n",
  317. ds->ds_remotestr,
  318. atomic_read(&ds->ds_count), ds->ds_clp,
  319. ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
  320. }
  321. static bool
  322. same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
  323. {
  324. struct sockaddr_in *a, *b;
  325. struct sockaddr_in6 *a6, *b6;
  326. if (addr1->sa_family != addr2->sa_family)
  327. return false;
  328. switch (addr1->sa_family) {
  329. case AF_INET:
  330. a = (struct sockaddr_in *)addr1;
  331. b = (struct sockaddr_in *)addr2;
  332. if (a->sin_addr.s_addr == b->sin_addr.s_addr &&
  333. a->sin_port == b->sin_port)
  334. return true;
  335. break;
  336. case AF_INET6:
  337. a6 = (struct sockaddr_in6 *)addr1;
  338. b6 = (struct sockaddr_in6 *)addr2;
  339. /* LINKLOCAL addresses must have matching scope_id */
  340. if (ipv6_addr_src_scope(&a6->sin6_addr) ==
  341. IPV6_ADDR_SCOPE_LINKLOCAL &&
  342. a6->sin6_scope_id != b6->sin6_scope_id)
  343. return false;
  344. if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) &&
  345. a6->sin6_port == b6->sin6_port)
  346. return true;
  347. break;
  348. default:
  349. dprintk("%s: unhandled address family: %u\n",
  350. __func__, addr1->sa_family);
  351. return false;
  352. }
  353. return false;
  354. }
  355. /*
  356. * Checks if 'dsaddrs1' contains a subset of 'dsaddrs2'. If it does,
  357. * declare a match.
  358. */
  359. static bool
  360. _same_data_server_addrs_locked(const struct list_head *dsaddrs1,
  361. const struct list_head *dsaddrs2)
  362. {
  363. struct nfs4_pnfs_ds_addr *da1, *da2;
  364. struct sockaddr *sa1, *sa2;
  365. bool match = false;
  366. list_for_each_entry(da1, dsaddrs1, da_node) {
  367. sa1 = (struct sockaddr *)&da1->da_addr;
  368. match = false;
  369. list_for_each_entry(da2, dsaddrs2, da_node) {
  370. sa2 = (struct sockaddr *)&da2->da_addr;
  371. match = same_sockaddr(sa1, sa2);
  372. if (match)
  373. break;
  374. }
  375. if (!match)
  376. break;
  377. }
  378. return match;
  379. }
  380. /*
  381. * Lookup DS by addresses. nfs4_ds_cache_lock is held
  382. */
  383. static struct nfs4_pnfs_ds *
  384. _data_server_lookup_locked(const struct list_head *dsaddrs)
  385. {
  386. struct nfs4_pnfs_ds *ds;
  387. list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
  388. if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs))
  389. return ds;
  390. return NULL;
  391. }
  392. static void destroy_ds(struct nfs4_pnfs_ds *ds)
  393. {
  394. struct nfs4_pnfs_ds_addr *da;
  395. dprintk("--> %s\n", __func__);
  396. ifdebug(FACILITY)
  397. print_ds(ds);
  398. nfs_put_client(ds->ds_clp);
  399. while (!list_empty(&ds->ds_addrs)) {
  400. da = list_first_entry(&ds->ds_addrs,
  401. struct nfs4_pnfs_ds_addr,
  402. da_node);
  403. list_del_init(&da->da_node);
  404. kfree(da->da_remotestr);
  405. kfree(da);
  406. }
  407. kfree(ds->ds_remotestr);
  408. kfree(ds);
  409. }
  410. void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds)
  411. {
  412. if (atomic_dec_and_lock(&ds->ds_count,
  413. &nfs4_ds_cache_lock)) {
  414. list_del_init(&ds->ds_node);
  415. spin_unlock(&nfs4_ds_cache_lock);
  416. destroy_ds(ds);
  417. }
  418. }
  419. EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_put);
  420. /*
  421. * Create a string with a human readable address and port to avoid
  422. * complicated setup around many dprinks.
  423. */
  424. static char *
  425. nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags)
  426. {
  427. struct nfs4_pnfs_ds_addr *da;
  428. char *remotestr;
  429. size_t len;
  430. char *p;
  431. len = 3; /* '{', '}' and eol */
  432. list_for_each_entry(da, dsaddrs, da_node) {
  433. len += strlen(da->da_remotestr) + 1; /* string plus comma */
  434. }
  435. remotestr = kzalloc(len, gfp_flags);
  436. if (!remotestr)
  437. return NULL;
  438. p = remotestr;
  439. *(p++) = '{';
  440. len--;
  441. list_for_each_entry(da, dsaddrs, da_node) {
  442. size_t ll = strlen(da->da_remotestr);
  443. if (ll > len)
  444. goto out_err;
  445. memcpy(p, da->da_remotestr, ll);
  446. p += ll;
  447. len -= ll;
  448. if (len < 1)
  449. goto out_err;
  450. (*p++) = ',';
  451. len--;
  452. }
  453. if (len < 2)
  454. goto out_err;
  455. *(p++) = '}';
  456. *p = '\0';
  457. return remotestr;
  458. out_err:
  459. kfree(remotestr);
  460. return NULL;
  461. }
  462. /*
  463. * Given a list of multipath struct nfs4_pnfs_ds_addr, add it to ds cache if
  464. * uncached and return cached struct nfs4_pnfs_ds.
  465. */
  466. struct nfs4_pnfs_ds *
  467. nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
  468. {
  469. struct nfs4_pnfs_ds *tmp_ds, *ds = NULL;
  470. char *remotestr;
  471. if (list_empty(dsaddrs)) {
  472. dprintk("%s: no addresses defined\n", __func__);
  473. goto out;
  474. }
  475. ds = kzalloc(sizeof(*ds), gfp_flags);
  476. if (!ds)
  477. goto out;
  478. /* this is only used for debugging, so it's ok if its NULL */
  479. remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags);
  480. spin_lock(&nfs4_ds_cache_lock);
  481. tmp_ds = _data_server_lookup_locked(dsaddrs);
  482. if (tmp_ds == NULL) {
  483. INIT_LIST_HEAD(&ds->ds_addrs);
  484. list_splice_init(dsaddrs, &ds->ds_addrs);
  485. ds->ds_remotestr = remotestr;
  486. atomic_set(&ds->ds_count, 1);
  487. INIT_LIST_HEAD(&ds->ds_node);
  488. ds->ds_clp = NULL;
  489. list_add(&ds->ds_node, &nfs4_data_server_cache);
  490. dprintk("%s add new data server %s\n", __func__,
  491. ds->ds_remotestr);
  492. } else {
  493. kfree(remotestr);
  494. kfree(ds);
  495. atomic_inc(&tmp_ds->ds_count);
  496. dprintk("%s data server %s found, inc'ed ds_count to %d\n",
  497. __func__, tmp_ds->ds_remotestr,
  498. atomic_read(&tmp_ds->ds_count));
  499. ds = tmp_ds;
  500. }
  501. spin_unlock(&nfs4_ds_cache_lock);
  502. out:
  503. return ds;
  504. }
  505. EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_add);
  506. static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
  507. {
  508. might_sleep();
  509. wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING,
  510. TASK_KILLABLE);
  511. }
  512. static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
  513. {
  514. smp_mb__before_atomic();
  515. clear_bit(NFS4DS_CONNECTING, &ds->ds_state);
  516. smp_mb__after_atomic();
  517. wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING);
  518. }
  519. static struct nfs_client *(*get_v3_ds_connect)(
  520. struct nfs_server *mds_srv,
  521. const struct sockaddr *ds_addr,
  522. int ds_addrlen,
  523. int ds_proto,
  524. unsigned int ds_timeo,
  525. unsigned int ds_retrans,
  526. rpc_authflavor_t au_flavor);
  527. static bool load_v3_ds_connect(void)
  528. {
  529. if (!get_v3_ds_connect) {
  530. get_v3_ds_connect = symbol_request(nfs3_set_ds_client);
  531. WARN_ON_ONCE(!get_v3_ds_connect);
  532. }
  533. return(get_v3_ds_connect != NULL);
  534. }
  535. void nfs4_pnfs_v3_ds_connect_unload(void)
  536. {
  537. if (get_v3_ds_connect) {
  538. symbol_put(nfs3_set_ds_client);
  539. get_v3_ds_connect = NULL;
  540. }
  541. }
  542. EXPORT_SYMBOL_GPL(nfs4_pnfs_v3_ds_connect_unload);
  543. static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
  544. struct nfs4_pnfs_ds *ds,
  545. unsigned int timeo,
  546. unsigned int retrans,
  547. rpc_authflavor_t au_flavor)
  548. {
  549. struct nfs_client *clp = ERR_PTR(-EIO);
  550. struct nfs4_pnfs_ds_addr *da;
  551. int status = 0;
  552. dprintk("--> %s DS %s au_flavor %d\n", __func__,
  553. ds->ds_remotestr, au_flavor);
  554. if (!load_v3_ds_connect())
  555. goto out;
  556. list_for_each_entry(da, &ds->ds_addrs, da_node) {
  557. dprintk("%s: DS %s: trying address %s\n",
  558. __func__, ds->ds_remotestr, da->da_remotestr);
  559. if (!IS_ERR(clp)) {
  560. struct xprt_create xprt_args = {
  561. .ident = XPRT_TRANSPORT_TCP,
  562. .net = clp->cl_net,
  563. .dstaddr = (struct sockaddr *)&da->da_addr,
  564. .addrlen = da->da_addrlen,
  565. .servername = clp->cl_hostname,
  566. };
  567. /* Add this address as an alias */
  568. rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
  569. rpc_clnt_test_and_add_xprt, NULL);
  570. } else
  571. clp = get_v3_ds_connect(mds_srv,
  572. (struct sockaddr *)&da->da_addr,
  573. da->da_addrlen, IPPROTO_TCP,
  574. timeo, retrans, au_flavor);
  575. }
  576. if (IS_ERR(clp)) {
  577. status = PTR_ERR(clp);
  578. goto out;
  579. }
  580. smp_wmb();
  581. ds->ds_clp = clp;
  582. dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
  583. out:
  584. return status;
  585. }
  586. static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
  587. struct nfs4_pnfs_ds *ds,
  588. unsigned int timeo,
  589. unsigned int retrans,
  590. u32 minor_version,
  591. rpc_authflavor_t au_flavor)
  592. {
  593. struct nfs_client *clp = ERR_PTR(-EIO);
  594. struct nfs4_pnfs_ds_addr *da;
  595. int status = 0;
  596. dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr,
  597. au_flavor);
  598. list_for_each_entry(da, &ds->ds_addrs, da_node) {
  599. dprintk("%s: DS %s: trying address %s\n",
  600. __func__, ds->ds_remotestr, da->da_remotestr);
  601. if (!IS_ERR(clp) && clp->cl_mvops->session_trunk) {
  602. struct xprt_create xprt_args = {
  603. .ident = XPRT_TRANSPORT_TCP,
  604. .net = clp->cl_net,
  605. .dstaddr = (struct sockaddr *)&da->da_addr,
  606. .addrlen = da->da_addrlen,
  607. .servername = clp->cl_hostname,
  608. };
  609. struct nfs4_add_xprt_data xprtdata = {
  610. .clp = clp,
  611. .cred = nfs4_get_clid_cred(clp),
  612. };
  613. struct rpc_add_xprt_test rpcdata = {
  614. .add_xprt_test = clp->cl_mvops->session_trunk,
  615. .data = &xprtdata,
  616. };
  617. /**
  618. * Test this address for session trunking and
  619. * add as an alias
  620. */
  621. rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
  622. rpc_clnt_setup_test_and_add_xprt,
  623. &rpcdata);
  624. if (xprtdata.cred)
  625. put_rpccred(xprtdata.cred);
  626. } else {
  627. clp = nfs4_set_ds_client(mds_srv,
  628. (struct sockaddr *)&da->da_addr,
  629. da->da_addrlen, IPPROTO_TCP,
  630. timeo, retrans, minor_version,
  631. au_flavor);
  632. if (IS_ERR(clp))
  633. continue;
  634. status = nfs4_init_ds_session(clp,
  635. mds_srv->nfs_client->cl_lease_time);
  636. if (status) {
  637. nfs_put_client(clp);
  638. clp = ERR_PTR(-EIO);
  639. continue;
  640. }
  641. }
  642. }
  643. if (IS_ERR(clp)) {
  644. status = PTR_ERR(clp);
  645. goto out;
  646. }
  647. smp_wmb();
  648. ds->ds_clp = clp;
  649. dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
  650. out:
  651. return status;
  652. }
  653. /*
  654. * Create an rpc connection to the nfs4_pnfs_ds data server.
  655. * Currently only supports IPv4 and IPv6 addresses.
  656. * If connection fails, make devid unavailable.
  657. */
  658. void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
  659. struct nfs4_deviceid_node *devid, unsigned int timeo,
  660. unsigned int retrans, u32 version,
  661. u32 minor_version, rpc_authflavor_t au_flavor)
  662. {
  663. if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
  664. int err = 0;
  665. if (version == 3) {
  666. err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo,
  667. retrans, au_flavor);
  668. } else if (version == 4) {
  669. err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo,
  670. retrans, minor_version,
  671. au_flavor);
  672. } else {
  673. dprintk("%s: unsupported DS version %d\n", __func__,
  674. version);
  675. err = -EPROTONOSUPPORT;
  676. }
  677. if (err)
  678. nfs4_mark_deviceid_unavailable(devid);
  679. nfs4_clear_ds_conn_bit(ds);
  680. } else {
  681. nfs4_wait_ds_connect(ds);
  682. }
  683. }
  684. EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
  685. /*
  686. * Currently only supports ipv4, ipv6 and one multi-path address.
  687. */
  688. struct nfs4_pnfs_ds_addr *
  689. nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags)
  690. {
  691. struct nfs4_pnfs_ds_addr *da = NULL;
  692. char *buf, *portstr;
  693. __be16 port;
  694. int nlen, rlen;
  695. int tmp[2];
  696. __be32 *p;
  697. char *netid, *match_netid;
  698. size_t len, match_netid_len;
  699. char *startsep = "";
  700. char *endsep = "";
  701. /* r_netid */
  702. p = xdr_inline_decode(xdr, 4);
  703. if (unlikely(!p))
  704. goto out_err;
  705. nlen = be32_to_cpup(p++);
  706. p = xdr_inline_decode(xdr, nlen);
  707. if (unlikely(!p))
  708. goto out_err;
  709. netid = kmalloc(nlen+1, gfp_flags);
  710. if (unlikely(!netid))
  711. goto out_err;
  712. netid[nlen] = '\0';
  713. memcpy(netid, p, nlen);
  714. /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */
  715. p = xdr_inline_decode(xdr, 4);
  716. if (unlikely(!p))
  717. goto out_free_netid;
  718. rlen = be32_to_cpup(p);
  719. p = xdr_inline_decode(xdr, rlen);
  720. if (unlikely(!p))
  721. goto out_free_netid;
  722. /* port is ".ABC.DEF", 8 chars max */
  723. if (rlen > INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN + 8) {
  724. dprintk("%s: Invalid address, length %d\n", __func__,
  725. rlen);
  726. goto out_free_netid;
  727. }
  728. buf = kmalloc(rlen + 1, gfp_flags);
  729. if (!buf) {
  730. dprintk("%s: Not enough memory\n", __func__);
  731. goto out_free_netid;
  732. }
  733. buf[rlen] = '\0';
  734. memcpy(buf, p, rlen);
  735. /* replace port '.' with '-' */
  736. portstr = strrchr(buf, '.');
  737. if (!portstr) {
  738. dprintk("%s: Failed finding expected dot in port\n",
  739. __func__);
  740. goto out_free_buf;
  741. }
  742. *portstr = '-';
  743. /* find '.' between address and port */
  744. portstr = strrchr(buf, '.');
  745. if (!portstr) {
  746. dprintk("%s: Failed finding expected dot between address and "
  747. "port\n", __func__);
  748. goto out_free_buf;
  749. }
  750. *portstr = '\0';
  751. da = kzalloc(sizeof(*da), gfp_flags);
  752. if (unlikely(!da))
  753. goto out_free_buf;
  754. INIT_LIST_HEAD(&da->da_node);
  755. if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr,
  756. sizeof(da->da_addr))) {
  757. dprintk("%s: error parsing address %s\n", __func__, buf);
  758. goto out_free_da;
  759. }
  760. portstr++;
  761. sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]);
  762. port = htons((tmp[0] << 8) | (tmp[1]));
  763. switch (da->da_addr.ss_family) {
  764. case AF_INET:
  765. ((struct sockaddr_in *)&da->da_addr)->sin_port = port;
  766. da->da_addrlen = sizeof(struct sockaddr_in);
  767. match_netid = "tcp";
  768. match_netid_len = 3;
  769. break;
  770. case AF_INET6:
  771. ((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port;
  772. da->da_addrlen = sizeof(struct sockaddr_in6);
  773. match_netid = "tcp6";
  774. match_netid_len = 4;
  775. startsep = "[";
  776. endsep = "]";
  777. break;
  778. default:
  779. dprintk("%s: unsupported address family: %u\n",
  780. __func__, da->da_addr.ss_family);
  781. goto out_free_da;
  782. }
  783. if (nlen != match_netid_len || strncmp(netid, match_netid, nlen)) {
  784. dprintk("%s: ERROR: r_netid \"%s\" != \"%s\"\n",
  785. __func__, netid, match_netid);
  786. goto out_free_da;
  787. }
  788. /* save human readable address */
  789. len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7;
  790. da->da_remotestr = kzalloc(len, gfp_flags);
  791. /* NULL is ok, only used for dprintk */
  792. if (da->da_remotestr)
  793. snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep,
  794. buf, endsep, ntohs(port));
  795. dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr);
  796. kfree(buf);
  797. kfree(netid);
  798. return da;
  799. out_free_da:
  800. kfree(da);
  801. out_free_buf:
  802. dprintk("%s: Error parsing DS addr: %s\n", __func__, buf);
  803. kfree(buf);
  804. out_free_netid:
  805. kfree(netid);
  806. out_err:
  807. return NULL;
  808. }
  809. EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr);
  810. void
  811. pnfs_layout_mark_request_commit(struct nfs_page *req,
  812. struct pnfs_layout_segment *lseg,
  813. struct nfs_commit_info *cinfo,
  814. u32 ds_commit_idx)
  815. {
  816. struct list_head *list;
  817. struct pnfs_commit_bucket *buckets;
  818. spin_lock(&cinfo->inode->i_lock);
  819. buckets = cinfo->ds->buckets;
  820. list = &buckets[ds_commit_idx].written;
  821. if (list_empty(list)) {
  822. if (!pnfs_is_valid_lseg(lseg)) {
  823. spin_unlock(&cinfo->inode->i_lock);
  824. cinfo->completion_ops->resched_write(cinfo, req);
  825. return;
  826. }
  827. /* Non-empty buckets hold a reference on the lseg. That ref
  828. * is normally transferred to the COMMIT call and released
  829. * there. It could also be released if the last req is pulled
  830. * off due to a rewrite, in which case it will be done in
  831. * pnfs_common_clear_request_commit
  832. */
  833. WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
  834. buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
  835. }
  836. set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
  837. cinfo->ds->nwritten++;
  838. nfs_request_add_commit_list_locked(req, list, cinfo);
  839. spin_unlock(&cinfo->inode->i_lock);
  840. nfs_mark_page_unstable(req->wb_page, cinfo);
  841. }
  842. EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
  843. int
  844. pnfs_nfs_generic_sync(struct inode *inode, bool datasync)
  845. {
  846. int ret;
  847. if (!pnfs_layoutcommit_outstanding(inode))
  848. return 0;
  849. ret = nfs_commit_inode(inode, FLUSH_SYNC);
  850. if (ret < 0)
  851. return ret;
  852. if (datasync)
  853. return 0;
  854. return pnfs_layoutcommit_inode(inode, true);
  855. }
  856. EXPORT_SYMBOL_GPL(pnfs_nfs_generic_sync);