pnfs_nfs.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881
  1. /*
  2. * Common NFS I/O operations for the pnfs file based
  3. * layout drivers.
  4. *
  5. * Copyright (c) 2014, Primary Data, Inc. All rights reserved.
  6. *
  7. * Tom Haynes <loghyr@primarydata.com>
  8. */
  9. #include <linux/nfs_fs.h>
  10. #include <linux/nfs_page.h>
  11. #include <linux/sunrpc/addr.h>
  12. #include <linux/module.h>
  13. #include "nfs4session.h"
  14. #include "internal.h"
  15. #include "pnfs.h"
  16. #define NFSDBG_FACILITY NFSDBG_PNFS
  17. void pnfs_generic_rw_release(void *data)
  18. {
  19. struct nfs_pgio_header *hdr = data;
  20. nfs_put_client(hdr->ds_clp);
  21. hdr->mds_ops->rpc_release(data);
  22. }
  23. EXPORT_SYMBOL_GPL(pnfs_generic_rw_release);
  24. /* Fake up some data that will cause nfs_commit_release to retry the writes. */
  25. void pnfs_generic_prepare_to_resend_writes(struct nfs_commit_data *data)
  26. {
  27. struct nfs_page *first = nfs_list_entry(data->pages.next);
  28. data->task.tk_status = 0;
  29. memcpy(&data->verf.verifier, &first->wb_verf,
  30. sizeof(data->verf.verifier));
  31. data->verf.verifier.data[0]++; /* ensure verifier mismatch */
  32. }
  33. EXPORT_SYMBOL_GPL(pnfs_generic_prepare_to_resend_writes);
  34. void pnfs_generic_write_commit_done(struct rpc_task *task, void *data)
  35. {
  36. struct nfs_commit_data *wdata = data;
  37. /* Note this may cause RPC to be resent */
  38. wdata->mds_ops->rpc_call_done(task, data);
  39. }
  40. EXPORT_SYMBOL_GPL(pnfs_generic_write_commit_done);
  41. void pnfs_generic_commit_release(void *calldata)
  42. {
  43. struct nfs_commit_data *data = calldata;
  44. data->completion_ops->completion(data);
  45. pnfs_put_lseg(data->lseg);
  46. nfs_put_client(data->ds_clp);
  47. nfs_commitdata_release(data);
  48. }
  49. EXPORT_SYMBOL_GPL(pnfs_generic_commit_release);
  50. /* The generic layer is about to remove the req from the commit list.
  51. * If this will make the bucket empty, it will need to put the lseg reference.
  52. * Note this must be called holding the inode (/cinfo) lock
  53. */
  54. void
  55. pnfs_generic_clear_request_commit(struct nfs_page *req,
  56. struct nfs_commit_info *cinfo)
  57. {
  58. struct pnfs_layout_segment *freeme = NULL;
  59. if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
  60. goto out;
  61. cinfo->ds->nwritten--;
  62. if (list_is_singular(&req->wb_list)) {
  63. struct pnfs_commit_bucket *bucket;
  64. bucket = list_first_entry(&req->wb_list,
  65. struct pnfs_commit_bucket,
  66. written);
  67. freeme = bucket->wlseg;
  68. bucket->wlseg = NULL;
  69. }
  70. out:
  71. nfs_request_remove_commit_list(req, cinfo);
  72. pnfs_put_lseg_locked(freeme);
  73. }
  74. EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit);
  75. static int
  76. pnfs_generic_transfer_commit_list(struct list_head *src, struct list_head *dst,
  77. struct nfs_commit_info *cinfo, int max)
  78. {
  79. struct nfs_page *req, *tmp;
  80. int ret = 0;
  81. list_for_each_entry_safe(req, tmp, src, wb_list) {
  82. if (!nfs_lock_request(req))
  83. continue;
  84. kref_get(&req->wb_kref);
  85. if (cond_resched_lock(cinfo->lock))
  86. list_safe_reset_next(req, tmp, wb_list);
  87. nfs_request_remove_commit_list(req, cinfo);
  88. clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
  89. nfs_list_add_request(req, dst);
  90. ret++;
  91. if ((ret == max) && !cinfo->dreq)
  92. break;
  93. }
  94. return ret;
  95. }
  96. static int
  97. pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
  98. struct nfs_commit_info *cinfo,
  99. int max)
  100. {
  101. struct list_head *src = &bucket->written;
  102. struct list_head *dst = &bucket->committing;
  103. int ret;
  104. lockdep_assert_held(cinfo->lock);
  105. ret = pnfs_generic_transfer_commit_list(src, dst, cinfo, max);
  106. if (ret) {
  107. cinfo->ds->nwritten -= ret;
  108. cinfo->ds->ncommitting += ret;
  109. bucket->clseg = bucket->wlseg;
  110. if (list_empty(src))
  111. bucket->wlseg = NULL;
  112. else
  113. pnfs_get_lseg(bucket->clseg);
  114. }
  115. return ret;
  116. }
  117. /* Move reqs from written to committing lists, returning count
  118. * of number moved.
  119. */
  120. int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo,
  121. int max)
  122. {
  123. int i, rv = 0, cnt;
  124. lockdep_assert_held(cinfo->lock);
  125. for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
  126. cnt = pnfs_generic_scan_ds_commit_list(&cinfo->ds->buckets[i],
  127. cinfo, max);
  128. max -= cnt;
  129. rv += cnt;
  130. }
  131. return rv;
  132. }
  133. EXPORT_SYMBOL_GPL(pnfs_generic_scan_commit_lists);
  134. /* Pull everything off the committing lists and dump into @dst. */
  135. void pnfs_generic_recover_commit_reqs(struct list_head *dst,
  136. struct nfs_commit_info *cinfo)
  137. {
  138. struct pnfs_commit_bucket *b;
  139. struct pnfs_layout_segment *freeme;
  140. int i;
  141. lockdep_assert_held(cinfo->lock);
  142. restart:
  143. for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
  144. if (pnfs_generic_transfer_commit_list(&b->written, dst,
  145. cinfo, 0)) {
  146. freeme = b->wlseg;
  147. b->wlseg = NULL;
  148. spin_unlock(cinfo->lock);
  149. pnfs_put_lseg(freeme);
  150. spin_lock(cinfo->lock);
  151. goto restart;
  152. }
  153. }
  154. cinfo->ds->nwritten = 0;
  155. }
  156. EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs);
  157. static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx)
  158. {
  159. struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
  160. struct pnfs_commit_bucket *bucket;
  161. struct pnfs_layout_segment *freeme;
  162. int i;
  163. for (i = idx; i < fl_cinfo->nbuckets; i++) {
  164. bucket = &fl_cinfo->buckets[i];
  165. if (list_empty(&bucket->committing))
  166. continue;
  167. nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo, i);
  168. spin_lock(cinfo->lock);
  169. freeme = bucket->clseg;
  170. bucket->clseg = NULL;
  171. spin_unlock(cinfo->lock);
  172. pnfs_put_lseg(freeme);
  173. }
  174. }
  175. static unsigned int
  176. pnfs_generic_alloc_ds_commits(struct nfs_commit_info *cinfo,
  177. struct list_head *list)
  178. {
  179. struct pnfs_ds_commit_info *fl_cinfo;
  180. struct pnfs_commit_bucket *bucket;
  181. struct nfs_commit_data *data;
  182. int i;
  183. unsigned int nreq = 0;
  184. fl_cinfo = cinfo->ds;
  185. bucket = fl_cinfo->buckets;
  186. for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) {
  187. if (list_empty(&bucket->committing))
  188. continue;
  189. data = nfs_commitdata_alloc();
  190. if (!data)
  191. break;
  192. data->ds_commit_index = i;
  193. spin_lock(cinfo->lock);
  194. data->lseg = bucket->clseg;
  195. bucket->clseg = NULL;
  196. spin_unlock(cinfo->lock);
  197. list_add(&data->pages, list);
  198. nreq++;
  199. }
  200. /* Clean up on error */
  201. pnfs_generic_retry_commit(cinfo, i);
  202. return nreq;
  203. }
  204. /* This follows nfs_commit_list pretty closely */
  205. int
  206. pnfs_generic_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
  207. int how, struct nfs_commit_info *cinfo,
  208. int (*initiate_commit)(struct nfs_commit_data *data,
  209. int how))
  210. {
  211. struct nfs_commit_data *data, *tmp;
  212. LIST_HEAD(list);
  213. unsigned int nreq = 0;
  214. if (!list_empty(mds_pages)) {
  215. data = nfs_commitdata_alloc();
  216. if (data != NULL) {
  217. data->lseg = NULL;
  218. list_add(&data->pages, &list);
  219. nreq++;
  220. } else {
  221. nfs_retry_commit(mds_pages, NULL, cinfo, 0);
  222. pnfs_generic_retry_commit(cinfo, 0);
  223. cinfo->completion_ops->error_cleanup(NFS_I(inode));
  224. return -ENOMEM;
  225. }
  226. }
  227. nreq += pnfs_generic_alloc_ds_commits(cinfo, &list);
  228. if (nreq == 0) {
  229. cinfo->completion_ops->error_cleanup(NFS_I(inode));
  230. goto out;
  231. }
  232. atomic_add(nreq, &cinfo->mds->rpcs_out);
  233. list_for_each_entry_safe(data, tmp, &list, pages) {
  234. list_del_init(&data->pages);
  235. if (!data->lseg) {
  236. nfs_init_commit(data, mds_pages, NULL, cinfo);
  237. nfs_initiate_commit(NFS_CLIENT(inode), data,
  238. NFS_PROTO(data->inode),
  239. data->mds_ops, how, 0);
  240. } else {
  241. struct pnfs_commit_bucket *buckets;
  242. buckets = cinfo->ds->buckets;
  243. nfs_init_commit(data,
  244. &buckets[data->ds_commit_index].committing,
  245. data->lseg,
  246. cinfo);
  247. initiate_commit(data, how);
  248. }
  249. }
  250. out:
  251. cinfo->ds->ncommitting = 0;
  252. return PNFS_ATTEMPTED;
  253. }
  254. EXPORT_SYMBOL_GPL(pnfs_generic_commit_pagelist);
  255. /*
  256. * Data server cache
  257. *
  258. * Data servers can be mapped to different device ids.
  259. * nfs4_pnfs_ds reference counting
  260. * - set to 1 on allocation
  261. * - incremented when a device id maps a data server already in the cache.
  262. * - decremented when deviceid is removed from the cache.
  263. */
  264. static DEFINE_SPINLOCK(nfs4_ds_cache_lock);
  265. static LIST_HEAD(nfs4_data_server_cache);
  266. /* Debug routines */
  267. static void
  268. print_ds(struct nfs4_pnfs_ds *ds)
  269. {
  270. if (ds == NULL) {
  271. printk(KERN_WARNING "%s NULL device\n", __func__);
  272. return;
  273. }
  274. printk(KERN_WARNING " ds %s\n"
  275. " ref count %d\n"
  276. " client %p\n"
  277. " cl_exchange_flags %x\n",
  278. ds->ds_remotestr,
  279. atomic_read(&ds->ds_count), ds->ds_clp,
  280. ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
  281. }
  282. static bool
  283. same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2)
  284. {
  285. struct sockaddr_in *a, *b;
  286. struct sockaddr_in6 *a6, *b6;
  287. if (addr1->sa_family != addr2->sa_family)
  288. return false;
  289. switch (addr1->sa_family) {
  290. case AF_INET:
  291. a = (struct sockaddr_in *)addr1;
  292. b = (struct sockaddr_in *)addr2;
  293. if (a->sin_addr.s_addr == b->sin_addr.s_addr &&
  294. a->sin_port == b->sin_port)
  295. return true;
  296. break;
  297. case AF_INET6:
  298. a6 = (struct sockaddr_in6 *)addr1;
  299. b6 = (struct sockaddr_in6 *)addr2;
  300. /* LINKLOCAL addresses must have matching scope_id */
  301. if (ipv6_addr_src_scope(&a6->sin6_addr) ==
  302. IPV6_ADDR_SCOPE_LINKLOCAL &&
  303. a6->sin6_scope_id != b6->sin6_scope_id)
  304. return false;
  305. if (ipv6_addr_equal(&a6->sin6_addr, &b6->sin6_addr) &&
  306. a6->sin6_port == b6->sin6_port)
  307. return true;
  308. break;
  309. default:
  310. dprintk("%s: unhandled address family: %u\n",
  311. __func__, addr1->sa_family);
  312. return false;
  313. }
  314. return false;
  315. }
  316. static bool
  317. _same_data_server_addrs_locked(const struct list_head *dsaddrs1,
  318. const struct list_head *dsaddrs2)
  319. {
  320. struct nfs4_pnfs_ds_addr *da1, *da2;
  321. /* step through both lists, comparing as we go */
  322. for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node),
  323. da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node);
  324. da1 != NULL && da2 != NULL;
  325. da1 = list_entry(da1->da_node.next, typeof(*da1), da_node),
  326. da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) {
  327. if (!same_sockaddr((struct sockaddr *)&da1->da_addr,
  328. (struct sockaddr *)&da2->da_addr))
  329. return false;
  330. }
  331. if (da1 == NULL && da2 == NULL)
  332. return true;
  333. return false;
  334. }
  335. /*
  336. * Lookup DS by addresses. nfs4_ds_cache_lock is held
  337. */
  338. static struct nfs4_pnfs_ds *
  339. _data_server_lookup_locked(const struct list_head *dsaddrs)
  340. {
  341. struct nfs4_pnfs_ds *ds;
  342. list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
  343. if (_same_data_server_addrs_locked(&ds->ds_addrs, dsaddrs))
  344. return ds;
  345. return NULL;
  346. }
  347. static void destroy_ds(struct nfs4_pnfs_ds *ds)
  348. {
  349. struct nfs4_pnfs_ds_addr *da;
  350. dprintk("--> %s\n", __func__);
  351. ifdebug(FACILITY)
  352. print_ds(ds);
  353. nfs_put_client(ds->ds_clp);
  354. while (!list_empty(&ds->ds_addrs)) {
  355. da = list_first_entry(&ds->ds_addrs,
  356. struct nfs4_pnfs_ds_addr,
  357. da_node);
  358. list_del_init(&da->da_node);
  359. kfree(da->da_remotestr);
  360. kfree(da);
  361. }
  362. kfree(ds->ds_remotestr);
  363. kfree(ds);
  364. }
  365. void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds)
  366. {
  367. if (atomic_dec_and_lock(&ds->ds_count,
  368. &nfs4_ds_cache_lock)) {
  369. list_del_init(&ds->ds_node);
  370. spin_unlock(&nfs4_ds_cache_lock);
  371. destroy_ds(ds);
  372. }
  373. }
  374. EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_put);
  375. /*
  376. * Create a string with a human readable address and port to avoid
  377. * complicated setup around many dprinks.
  378. */
  379. static char *
  380. nfs4_pnfs_remotestr(struct list_head *dsaddrs, gfp_t gfp_flags)
  381. {
  382. struct nfs4_pnfs_ds_addr *da;
  383. char *remotestr;
  384. size_t len;
  385. char *p;
  386. len = 3; /* '{', '}' and eol */
  387. list_for_each_entry(da, dsaddrs, da_node) {
  388. len += strlen(da->da_remotestr) + 1; /* string plus comma */
  389. }
  390. remotestr = kzalloc(len, gfp_flags);
  391. if (!remotestr)
  392. return NULL;
  393. p = remotestr;
  394. *(p++) = '{';
  395. len--;
  396. list_for_each_entry(da, dsaddrs, da_node) {
  397. size_t ll = strlen(da->da_remotestr);
  398. if (ll > len)
  399. goto out_err;
  400. memcpy(p, da->da_remotestr, ll);
  401. p += ll;
  402. len -= ll;
  403. if (len < 1)
  404. goto out_err;
  405. (*p++) = ',';
  406. len--;
  407. }
  408. if (len < 2)
  409. goto out_err;
  410. *(p++) = '}';
  411. *p = '\0';
  412. return remotestr;
  413. out_err:
  414. kfree(remotestr);
  415. return NULL;
  416. }
  417. /*
  418. * Given a list of multipath struct nfs4_pnfs_ds_addr, add it to ds cache if
  419. * uncached and return cached struct nfs4_pnfs_ds.
  420. */
  421. struct nfs4_pnfs_ds *
  422. nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
  423. {
  424. struct nfs4_pnfs_ds *tmp_ds, *ds = NULL;
  425. char *remotestr;
  426. if (list_empty(dsaddrs)) {
  427. dprintk("%s: no addresses defined\n", __func__);
  428. goto out;
  429. }
  430. ds = kzalloc(sizeof(*ds), gfp_flags);
  431. if (!ds)
  432. goto out;
  433. /* this is only used for debugging, so it's ok if its NULL */
  434. remotestr = nfs4_pnfs_remotestr(dsaddrs, gfp_flags);
  435. spin_lock(&nfs4_ds_cache_lock);
  436. tmp_ds = _data_server_lookup_locked(dsaddrs);
  437. if (tmp_ds == NULL) {
  438. INIT_LIST_HEAD(&ds->ds_addrs);
  439. list_splice_init(dsaddrs, &ds->ds_addrs);
  440. ds->ds_remotestr = remotestr;
  441. atomic_set(&ds->ds_count, 1);
  442. INIT_LIST_HEAD(&ds->ds_node);
  443. ds->ds_clp = NULL;
  444. list_add(&ds->ds_node, &nfs4_data_server_cache);
  445. dprintk("%s add new data server %s\n", __func__,
  446. ds->ds_remotestr);
  447. } else {
  448. kfree(remotestr);
  449. kfree(ds);
  450. atomic_inc(&tmp_ds->ds_count);
  451. dprintk("%s data server %s found, inc'ed ds_count to %d\n",
  452. __func__, tmp_ds->ds_remotestr,
  453. atomic_read(&tmp_ds->ds_count));
  454. ds = tmp_ds;
  455. }
  456. spin_unlock(&nfs4_ds_cache_lock);
  457. out:
  458. return ds;
  459. }
  460. EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_add);
  461. static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
  462. {
  463. might_sleep();
  464. wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING,
  465. TASK_KILLABLE);
  466. }
  467. static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
  468. {
  469. smp_mb__before_atomic();
  470. clear_bit(NFS4DS_CONNECTING, &ds->ds_state);
  471. smp_mb__after_atomic();
  472. wake_up_bit(&ds->ds_state, NFS4DS_CONNECTING);
  473. }
  474. static struct nfs_client *(*get_v3_ds_connect)(
  475. struct nfs_client *mds_clp,
  476. const struct sockaddr *ds_addr,
  477. int ds_addrlen,
  478. int ds_proto,
  479. unsigned int ds_timeo,
  480. unsigned int ds_retrans,
  481. rpc_authflavor_t au_flavor);
  482. static bool load_v3_ds_connect(void)
  483. {
  484. if (!get_v3_ds_connect) {
  485. get_v3_ds_connect = symbol_request(nfs3_set_ds_client);
  486. WARN_ON_ONCE(!get_v3_ds_connect);
  487. }
  488. return(get_v3_ds_connect != NULL);
  489. }
  490. void nfs4_pnfs_v3_ds_connect_unload(void)
  491. {
  492. if (get_v3_ds_connect) {
  493. symbol_put(nfs3_set_ds_client);
  494. get_v3_ds_connect = NULL;
  495. }
  496. }
  497. EXPORT_SYMBOL_GPL(nfs4_pnfs_v3_ds_connect_unload);
  498. static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
  499. struct nfs4_pnfs_ds *ds,
  500. unsigned int timeo,
  501. unsigned int retrans,
  502. rpc_authflavor_t au_flavor)
  503. {
  504. struct nfs_client *clp = ERR_PTR(-EIO);
  505. struct nfs4_pnfs_ds_addr *da;
  506. int status = 0;
  507. dprintk("--> %s DS %s au_flavor %d\n", __func__,
  508. ds->ds_remotestr, au_flavor);
  509. if (!load_v3_ds_connect())
  510. goto out;
  511. list_for_each_entry(da, &ds->ds_addrs, da_node) {
  512. dprintk("%s: DS %s: trying address %s\n",
  513. __func__, ds->ds_remotestr, da->da_remotestr);
  514. clp = get_v3_ds_connect(mds_srv->nfs_client,
  515. (struct sockaddr *)&da->da_addr,
  516. da->da_addrlen, IPPROTO_TCP,
  517. timeo, retrans, au_flavor);
  518. if (!IS_ERR(clp))
  519. break;
  520. }
  521. if (IS_ERR(clp)) {
  522. status = PTR_ERR(clp);
  523. goto out;
  524. }
  525. smp_wmb();
  526. ds->ds_clp = clp;
  527. dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
  528. out:
  529. return status;
  530. }
  531. static int _nfs4_pnfs_v4_ds_connect(struct nfs_server *mds_srv,
  532. struct nfs4_pnfs_ds *ds,
  533. unsigned int timeo,
  534. unsigned int retrans,
  535. u32 minor_version,
  536. rpc_authflavor_t au_flavor)
  537. {
  538. struct nfs_client *clp = ERR_PTR(-EIO);
  539. struct nfs4_pnfs_ds_addr *da;
  540. int status = 0;
  541. dprintk("--> %s DS %s au_flavor %d\n", __func__, ds->ds_remotestr,
  542. au_flavor);
  543. list_for_each_entry(da, &ds->ds_addrs, da_node) {
  544. dprintk("%s: DS %s: trying address %s\n",
  545. __func__, ds->ds_remotestr, da->da_remotestr);
  546. clp = nfs4_set_ds_client(mds_srv->nfs_client,
  547. (struct sockaddr *)&da->da_addr,
  548. da->da_addrlen, IPPROTO_TCP,
  549. timeo, retrans, minor_version,
  550. au_flavor);
  551. if (!IS_ERR(clp))
  552. break;
  553. }
  554. if (IS_ERR(clp)) {
  555. status = PTR_ERR(clp);
  556. goto out;
  557. }
  558. status = nfs4_init_ds_session(clp, mds_srv->nfs_client->cl_lease_time);
  559. if (status)
  560. goto out_put;
  561. smp_wmb();
  562. ds->ds_clp = clp;
  563. dprintk("%s [new] addr: %s\n", __func__, ds->ds_remotestr);
  564. out:
  565. return status;
  566. out_put:
  567. nfs_put_client(clp);
  568. goto out;
  569. }
  570. /*
  571. * Create an rpc connection to the nfs4_pnfs_ds data server.
  572. * Currently only supports IPv4 and IPv6 addresses.
  573. * If connection fails, make devid unavailable.
  574. */
  575. void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
  576. struct nfs4_deviceid_node *devid, unsigned int timeo,
  577. unsigned int retrans, u32 version,
  578. u32 minor_version, rpc_authflavor_t au_flavor)
  579. {
  580. if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) {
  581. int err = 0;
  582. if (version == 3) {
  583. err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo,
  584. retrans, au_flavor);
  585. } else if (version == 4) {
  586. err = _nfs4_pnfs_v4_ds_connect(mds_srv, ds, timeo,
  587. retrans, minor_version,
  588. au_flavor);
  589. } else {
  590. dprintk("%s: unsupported DS version %d\n", __func__,
  591. version);
  592. err = -EPROTONOSUPPORT;
  593. }
  594. if (err)
  595. nfs4_mark_deviceid_unavailable(devid);
  596. nfs4_clear_ds_conn_bit(ds);
  597. } else {
  598. nfs4_wait_ds_connect(ds);
  599. }
  600. }
  601. EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect);
  602. /*
  603. * Currently only supports ipv4, ipv6 and one multi-path address.
  604. */
  605. struct nfs4_pnfs_ds_addr *
  606. nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags)
  607. {
  608. struct nfs4_pnfs_ds_addr *da = NULL;
  609. char *buf, *portstr;
  610. __be16 port;
  611. int nlen, rlen;
  612. int tmp[2];
  613. __be32 *p;
  614. char *netid, *match_netid;
  615. size_t len, match_netid_len;
  616. char *startsep = "";
  617. char *endsep = "";
  618. /* r_netid */
  619. p = xdr_inline_decode(xdr, 4);
  620. if (unlikely(!p))
  621. goto out_err;
  622. nlen = be32_to_cpup(p++);
  623. p = xdr_inline_decode(xdr, nlen);
  624. if (unlikely(!p))
  625. goto out_err;
  626. netid = kmalloc(nlen+1, gfp_flags);
  627. if (unlikely(!netid))
  628. goto out_err;
  629. netid[nlen] = '\0';
  630. memcpy(netid, p, nlen);
  631. /* r_addr: ip/ip6addr with port in dec octets - see RFC 5665 */
  632. p = xdr_inline_decode(xdr, 4);
  633. if (unlikely(!p))
  634. goto out_free_netid;
  635. rlen = be32_to_cpup(p);
  636. p = xdr_inline_decode(xdr, rlen);
  637. if (unlikely(!p))
  638. goto out_free_netid;
  639. /* port is ".ABC.DEF", 8 chars max */
  640. if (rlen > INET6_ADDRSTRLEN + IPV6_SCOPE_ID_LEN + 8) {
  641. dprintk("%s: Invalid address, length %d\n", __func__,
  642. rlen);
  643. goto out_free_netid;
  644. }
  645. buf = kmalloc(rlen + 1, gfp_flags);
  646. if (!buf) {
  647. dprintk("%s: Not enough memory\n", __func__);
  648. goto out_free_netid;
  649. }
  650. buf[rlen] = '\0';
  651. memcpy(buf, p, rlen);
  652. /* replace port '.' with '-' */
  653. portstr = strrchr(buf, '.');
  654. if (!portstr) {
  655. dprintk("%s: Failed finding expected dot in port\n",
  656. __func__);
  657. goto out_free_buf;
  658. }
  659. *portstr = '-';
  660. /* find '.' between address and port */
  661. portstr = strrchr(buf, '.');
  662. if (!portstr) {
  663. dprintk("%s: Failed finding expected dot between address and "
  664. "port\n", __func__);
  665. goto out_free_buf;
  666. }
  667. *portstr = '\0';
  668. da = kzalloc(sizeof(*da), gfp_flags);
  669. if (unlikely(!da))
  670. goto out_free_buf;
  671. INIT_LIST_HEAD(&da->da_node);
  672. if (!rpc_pton(net, buf, portstr-buf, (struct sockaddr *)&da->da_addr,
  673. sizeof(da->da_addr))) {
  674. dprintk("%s: error parsing address %s\n", __func__, buf);
  675. goto out_free_da;
  676. }
  677. portstr++;
  678. sscanf(portstr, "%d-%d", &tmp[0], &tmp[1]);
  679. port = htons((tmp[0] << 8) | (tmp[1]));
  680. switch (da->da_addr.ss_family) {
  681. case AF_INET:
  682. ((struct sockaddr_in *)&da->da_addr)->sin_port = port;
  683. da->da_addrlen = sizeof(struct sockaddr_in);
  684. match_netid = "tcp";
  685. match_netid_len = 3;
  686. break;
  687. case AF_INET6:
  688. ((struct sockaddr_in6 *)&da->da_addr)->sin6_port = port;
  689. da->da_addrlen = sizeof(struct sockaddr_in6);
  690. match_netid = "tcp6";
  691. match_netid_len = 4;
  692. startsep = "[";
  693. endsep = "]";
  694. break;
  695. default:
  696. dprintk("%s: unsupported address family: %u\n",
  697. __func__, da->da_addr.ss_family);
  698. goto out_free_da;
  699. }
  700. if (nlen != match_netid_len || strncmp(netid, match_netid, nlen)) {
  701. dprintk("%s: ERROR: r_netid \"%s\" != \"%s\"\n",
  702. __func__, netid, match_netid);
  703. goto out_free_da;
  704. }
  705. /* save human readable address */
  706. len = strlen(startsep) + strlen(buf) + strlen(endsep) + 7;
  707. da->da_remotestr = kzalloc(len, gfp_flags);
  708. /* NULL is ok, only used for dprintk */
  709. if (da->da_remotestr)
  710. snprintf(da->da_remotestr, len, "%s%s%s:%u", startsep,
  711. buf, endsep, ntohs(port));
  712. dprintk("%s: Parsed DS addr %s\n", __func__, da->da_remotestr);
  713. kfree(buf);
  714. kfree(netid);
  715. return da;
  716. out_free_da:
  717. kfree(da);
  718. out_free_buf:
  719. dprintk("%s: Error parsing DS addr: %s\n", __func__, buf);
  720. kfree(buf);
  721. out_free_netid:
  722. kfree(netid);
  723. out_err:
  724. return NULL;
  725. }
  726. EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr);
  727. void
  728. pnfs_layout_mark_request_commit(struct nfs_page *req,
  729. struct pnfs_layout_segment *lseg,
  730. struct nfs_commit_info *cinfo,
  731. u32 ds_commit_idx)
  732. {
  733. struct list_head *list;
  734. struct pnfs_commit_bucket *buckets;
  735. spin_lock(cinfo->lock);
  736. buckets = cinfo->ds->buckets;
  737. list = &buckets[ds_commit_idx].written;
  738. if (list_empty(list)) {
  739. /* Non-empty buckets hold a reference on the lseg. That ref
  740. * is normally transferred to the COMMIT call and released
  741. * there. It could also be released if the last req is pulled
  742. * off due to a rewrite, in which case it will be done in
  743. * pnfs_common_clear_request_commit
  744. */
  745. WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
  746. buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
  747. }
  748. set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
  749. cinfo->ds->nwritten++;
  750. spin_unlock(cinfo->lock);
  751. nfs_request_add_commit_list(req, list, cinfo);
  752. }
  753. EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
  754. int
  755. pnfs_nfs_generic_sync(struct inode *inode, bool datasync)
  756. {
  757. if (datasync)
  758. return 0;
  759. return pnfs_layoutcommit_inode(inode, true);
  760. }
  761. EXPORT_SYMBOL_GPL(pnfs_nfs_generic_sync);