core.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2015 IT University of Copenhagen. All rights reserved.
  4. * Initial release: Matias Bjorling <m@bjorling.me>
  5. */
  6. #define pr_fmt(fmt) "nvm: " fmt
  7. #include <linux/list.h>
  8. #include <linux/types.h>
  9. #include <linux/sem.h>
  10. #include <linux/bitmap.h>
  11. #include <linux/module.h>
  12. #include <linux/moduleparam.h>
  13. #include <linux/miscdevice.h>
  14. #include <linux/lightnvm.h>
  15. #include <linux/sched/sysctl.h>
  16. static LIST_HEAD(nvm_tgt_types);
  17. static DECLARE_RWSEM(nvm_tgtt_lock);
  18. static LIST_HEAD(nvm_devices);
  19. static DECLARE_RWSEM(nvm_lock);
  20. /* Map between virtual and physical channel and lun */
  21. struct nvm_ch_map {
  22. int ch_off;
  23. int num_lun;
  24. int *lun_offs;
  25. };
  26. struct nvm_dev_map {
  27. struct nvm_ch_map *chnls;
  28. int num_ch;
  29. };
  30. static void nvm_free(struct kref *ref);
  31. static struct nvm_target *nvm_find_target(struct nvm_dev *dev, const char *name)
  32. {
  33. struct nvm_target *tgt;
  34. list_for_each_entry(tgt, &dev->targets, list)
  35. if (!strcmp(name, tgt->disk->disk_name))
  36. return tgt;
  37. return NULL;
  38. }
  39. static bool nvm_target_exists(const char *name)
  40. {
  41. struct nvm_dev *dev;
  42. struct nvm_target *tgt;
  43. bool ret = false;
  44. down_write(&nvm_lock);
  45. list_for_each_entry(dev, &nvm_devices, devices) {
  46. mutex_lock(&dev->mlock);
  47. list_for_each_entry(tgt, &dev->targets, list) {
  48. if (!strcmp(name, tgt->disk->disk_name)) {
  49. ret = true;
  50. mutex_unlock(&dev->mlock);
  51. goto out;
  52. }
  53. }
  54. mutex_unlock(&dev->mlock);
  55. }
  56. out:
  57. up_write(&nvm_lock);
  58. return ret;
  59. }
  60. static int nvm_reserve_luns(struct nvm_dev *dev, int lun_begin, int lun_end)
  61. {
  62. int i;
  63. for (i = lun_begin; i <= lun_end; i++) {
  64. if (test_and_set_bit(i, dev->lun_map)) {
  65. pr_err("lun %d already allocated\n", i);
  66. goto err;
  67. }
  68. }
  69. return 0;
  70. err:
  71. while (--i >= lun_begin)
  72. clear_bit(i, dev->lun_map);
  73. return -EBUSY;
  74. }
  75. static void nvm_release_luns_err(struct nvm_dev *dev, int lun_begin,
  76. int lun_end)
  77. {
  78. int i;
  79. for (i = lun_begin; i <= lun_end; i++)
  80. WARN_ON(!test_and_clear_bit(i, dev->lun_map));
  81. }
  82. static void nvm_remove_tgt_dev(struct nvm_tgt_dev *tgt_dev, int clear)
  83. {
  84. struct nvm_dev *dev = tgt_dev->parent;
  85. struct nvm_dev_map *dev_map = tgt_dev->map;
  86. int i, j;
  87. for (i = 0; i < dev_map->num_ch; i++) {
  88. struct nvm_ch_map *ch_map = &dev_map->chnls[i];
  89. int *lun_offs = ch_map->lun_offs;
  90. int ch = i + ch_map->ch_off;
  91. if (clear) {
  92. for (j = 0; j < ch_map->num_lun; j++) {
  93. int lun = j + lun_offs[j];
  94. int lunid = (ch * dev->geo.num_lun) + lun;
  95. WARN_ON(!test_and_clear_bit(lunid,
  96. dev->lun_map));
  97. }
  98. }
  99. kfree(ch_map->lun_offs);
  100. }
  101. kfree(dev_map->chnls);
  102. kfree(dev_map);
  103. kfree(tgt_dev->luns);
  104. kfree(tgt_dev);
  105. }
  106. static struct nvm_tgt_dev *nvm_create_tgt_dev(struct nvm_dev *dev,
  107. u16 lun_begin, u16 lun_end,
  108. u16 op)
  109. {
  110. struct nvm_tgt_dev *tgt_dev = NULL;
  111. struct nvm_dev_map *dev_rmap = dev->rmap;
  112. struct nvm_dev_map *dev_map;
  113. struct ppa_addr *luns;
  114. int num_lun = lun_end - lun_begin + 1;
  115. int luns_left = num_lun;
  116. int num_ch = num_lun / dev->geo.num_lun;
  117. int num_ch_mod = num_lun % dev->geo.num_lun;
  118. int bch = lun_begin / dev->geo.num_lun;
  119. int blun = lun_begin % dev->geo.num_lun;
  120. int lunid = 0;
  121. int lun_balanced = 1;
  122. int sec_per_lun, prev_num_lun;
  123. int i, j;
  124. num_ch = (num_ch_mod == 0) ? num_ch : num_ch + 1;
  125. dev_map = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
  126. if (!dev_map)
  127. goto err_dev;
  128. dev_map->chnls = kcalloc(num_ch, sizeof(struct nvm_ch_map), GFP_KERNEL);
  129. if (!dev_map->chnls)
  130. goto err_chnls;
  131. luns = kcalloc(num_lun, sizeof(struct ppa_addr), GFP_KERNEL);
  132. if (!luns)
  133. goto err_luns;
  134. prev_num_lun = (luns_left > dev->geo.num_lun) ?
  135. dev->geo.num_lun : luns_left;
  136. for (i = 0; i < num_ch; i++) {
  137. struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[i + bch];
  138. int *lun_roffs = ch_rmap->lun_offs;
  139. struct nvm_ch_map *ch_map = &dev_map->chnls[i];
  140. int *lun_offs;
  141. int luns_in_chnl = (luns_left > dev->geo.num_lun) ?
  142. dev->geo.num_lun : luns_left;
  143. if (lun_balanced && prev_num_lun != luns_in_chnl)
  144. lun_balanced = 0;
  145. ch_map->ch_off = ch_rmap->ch_off = bch;
  146. ch_map->num_lun = luns_in_chnl;
  147. lun_offs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
  148. if (!lun_offs)
  149. goto err_ch;
  150. for (j = 0; j < luns_in_chnl; j++) {
  151. luns[lunid].ppa = 0;
  152. luns[lunid].a.ch = i;
  153. luns[lunid++].a.lun = j;
  154. lun_offs[j] = blun;
  155. lun_roffs[j + blun] = blun;
  156. }
  157. ch_map->lun_offs = lun_offs;
  158. /* when starting a new channel, lun offset is reset */
  159. blun = 0;
  160. luns_left -= luns_in_chnl;
  161. }
  162. dev_map->num_ch = num_ch;
  163. tgt_dev = kmalloc(sizeof(struct nvm_tgt_dev), GFP_KERNEL);
  164. if (!tgt_dev)
  165. goto err_ch;
  166. /* Inherit device geometry from parent */
  167. memcpy(&tgt_dev->geo, &dev->geo, sizeof(struct nvm_geo));
  168. /* Target device only owns a portion of the physical device */
  169. tgt_dev->geo.num_ch = num_ch;
  170. tgt_dev->geo.num_lun = (lun_balanced) ? prev_num_lun : -1;
  171. tgt_dev->geo.all_luns = num_lun;
  172. tgt_dev->geo.all_chunks = num_lun * dev->geo.num_chk;
  173. tgt_dev->geo.op = op;
  174. sec_per_lun = dev->geo.clba * dev->geo.num_chk;
  175. tgt_dev->geo.total_secs = num_lun * sec_per_lun;
  176. tgt_dev->q = dev->q;
  177. tgt_dev->map = dev_map;
  178. tgt_dev->luns = luns;
  179. tgt_dev->parent = dev;
  180. return tgt_dev;
  181. err_ch:
  182. while (--i >= 0)
  183. kfree(dev_map->chnls[i].lun_offs);
  184. kfree(luns);
  185. err_luns:
  186. kfree(dev_map->chnls);
  187. err_chnls:
  188. kfree(dev_map);
  189. err_dev:
  190. return tgt_dev;
  191. }
  192. static const struct block_device_operations nvm_fops = {
  193. .owner = THIS_MODULE,
  194. };
  195. static struct nvm_tgt_type *__nvm_find_target_type(const char *name)
  196. {
  197. struct nvm_tgt_type *tt;
  198. list_for_each_entry(tt, &nvm_tgt_types, list)
  199. if (!strcmp(name, tt->name))
  200. return tt;
  201. return NULL;
  202. }
  203. static struct nvm_tgt_type *nvm_find_target_type(const char *name)
  204. {
  205. struct nvm_tgt_type *tt;
  206. down_write(&nvm_tgtt_lock);
  207. tt = __nvm_find_target_type(name);
  208. up_write(&nvm_tgtt_lock);
  209. return tt;
  210. }
  211. static int nvm_config_check_luns(struct nvm_geo *geo, int lun_begin,
  212. int lun_end)
  213. {
  214. if (lun_begin > lun_end || lun_end >= geo->all_luns) {
  215. pr_err("lun out of bound (%u:%u > %u)\n",
  216. lun_begin, lun_end, geo->all_luns - 1);
  217. return -EINVAL;
  218. }
  219. return 0;
  220. }
  221. static int __nvm_config_simple(struct nvm_dev *dev,
  222. struct nvm_ioctl_create_simple *s)
  223. {
  224. struct nvm_geo *geo = &dev->geo;
  225. if (s->lun_begin == -1 && s->lun_end == -1) {
  226. s->lun_begin = 0;
  227. s->lun_end = geo->all_luns - 1;
  228. }
  229. return nvm_config_check_luns(geo, s->lun_begin, s->lun_end);
  230. }
  231. static int __nvm_config_extended(struct nvm_dev *dev,
  232. struct nvm_ioctl_create_extended *e)
  233. {
  234. if (e->lun_begin == 0xFFFF && e->lun_end == 0xFFFF) {
  235. e->lun_begin = 0;
  236. e->lun_end = dev->geo.all_luns - 1;
  237. }
  238. /* op not set falls into target's default */
  239. if (e->op == 0xFFFF) {
  240. e->op = NVM_TARGET_DEFAULT_OP;
  241. } else if (e->op < NVM_TARGET_MIN_OP || e->op > NVM_TARGET_MAX_OP) {
  242. pr_err("invalid over provisioning value\n");
  243. return -EINVAL;
  244. }
  245. return nvm_config_check_luns(&dev->geo, e->lun_begin, e->lun_end);
  246. }
  247. static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
  248. {
  249. struct nvm_ioctl_create_extended e;
  250. struct request_queue *tqueue;
  251. struct gendisk *tdisk;
  252. struct nvm_tgt_type *tt;
  253. struct nvm_target *t;
  254. struct nvm_tgt_dev *tgt_dev;
  255. void *targetdata;
  256. unsigned int mdts;
  257. int ret;
  258. switch (create->conf.type) {
  259. case NVM_CONFIG_TYPE_SIMPLE:
  260. ret = __nvm_config_simple(dev, &create->conf.s);
  261. if (ret)
  262. return ret;
  263. e.lun_begin = create->conf.s.lun_begin;
  264. e.lun_end = create->conf.s.lun_end;
  265. e.op = NVM_TARGET_DEFAULT_OP;
  266. break;
  267. case NVM_CONFIG_TYPE_EXTENDED:
  268. ret = __nvm_config_extended(dev, &create->conf.e);
  269. if (ret)
  270. return ret;
  271. e = create->conf.e;
  272. break;
  273. default:
  274. pr_err("config type not valid\n");
  275. return -EINVAL;
  276. }
  277. tt = nvm_find_target_type(create->tgttype);
  278. if (!tt) {
  279. pr_err("target type %s not found\n", create->tgttype);
  280. return -EINVAL;
  281. }
  282. if ((tt->flags & NVM_TGT_F_HOST_L2P) != (dev->geo.dom & NVM_RSP_L2P)) {
  283. pr_err("device is incompatible with target L2P type.\n");
  284. return -EINVAL;
  285. }
  286. if (nvm_target_exists(create->tgtname)) {
  287. pr_err("target name already exists (%s)\n",
  288. create->tgtname);
  289. return -EINVAL;
  290. }
  291. ret = nvm_reserve_luns(dev, e.lun_begin, e.lun_end);
  292. if (ret)
  293. return ret;
  294. t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
  295. if (!t) {
  296. ret = -ENOMEM;
  297. goto err_reserve;
  298. }
  299. tgt_dev = nvm_create_tgt_dev(dev, e.lun_begin, e.lun_end, e.op);
  300. if (!tgt_dev) {
  301. pr_err("could not create target device\n");
  302. ret = -ENOMEM;
  303. goto err_t;
  304. }
  305. tdisk = alloc_disk(0);
  306. if (!tdisk) {
  307. ret = -ENOMEM;
  308. goto err_dev;
  309. }
  310. tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
  311. if (!tqueue) {
  312. ret = -ENOMEM;
  313. goto err_disk;
  314. }
  315. blk_queue_make_request(tqueue, tt->make_rq);
  316. strlcpy(tdisk->disk_name, create->tgtname, sizeof(tdisk->disk_name));
  317. tdisk->flags = GENHD_FL_EXT_DEVT;
  318. tdisk->major = 0;
  319. tdisk->first_minor = 0;
  320. tdisk->fops = &nvm_fops;
  321. tdisk->queue = tqueue;
  322. targetdata = tt->init(tgt_dev, tdisk, create->flags);
  323. if (IS_ERR(targetdata)) {
  324. ret = PTR_ERR(targetdata);
  325. goto err_init;
  326. }
  327. tdisk->private_data = targetdata;
  328. tqueue->queuedata = targetdata;
  329. mdts = (dev->geo.csecs >> 9) * NVM_MAX_VLBA;
  330. if (dev->geo.mdts) {
  331. mdts = min_t(u32, dev->geo.mdts,
  332. (dev->geo.csecs >> 9) * NVM_MAX_VLBA);
  333. }
  334. blk_queue_max_hw_sectors(tqueue, mdts);
  335. set_capacity(tdisk, tt->capacity(targetdata));
  336. add_disk(tdisk);
  337. if (tt->sysfs_init && tt->sysfs_init(tdisk)) {
  338. ret = -ENOMEM;
  339. goto err_sysfs;
  340. }
  341. t->type = tt;
  342. t->disk = tdisk;
  343. t->dev = tgt_dev;
  344. mutex_lock(&dev->mlock);
  345. list_add_tail(&t->list, &dev->targets);
  346. mutex_unlock(&dev->mlock);
  347. __module_get(tt->owner);
  348. return 0;
  349. err_sysfs:
  350. if (tt->exit)
  351. tt->exit(targetdata, true);
  352. err_init:
  353. blk_cleanup_queue(tqueue);
  354. tdisk->queue = NULL;
  355. err_disk:
  356. put_disk(tdisk);
  357. err_dev:
  358. nvm_remove_tgt_dev(tgt_dev, 0);
  359. err_t:
  360. kfree(t);
  361. err_reserve:
  362. nvm_release_luns_err(dev, e.lun_begin, e.lun_end);
  363. return ret;
  364. }
  365. static void __nvm_remove_target(struct nvm_target *t, bool graceful)
  366. {
  367. struct nvm_tgt_type *tt = t->type;
  368. struct gendisk *tdisk = t->disk;
  369. struct request_queue *q = tdisk->queue;
  370. del_gendisk(tdisk);
  371. blk_cleanup_queue(q);
  372. if (tt->sysfs_exit)
  373. tt->sysfs_exit(tdisk);
  374. if (tt->exit)
  375. tt->exit(tdisk->private_data, graceful);
  376. nvm_remove_tgt_dev(t->dev, 1);
  377. put_disk(tdisk);
  378. module_put(t->type->owner);
  379. list_del(&t->list);
  380. kfree(t);
  381. }
  382. /**
  383. * nvm_remove_tgt - Removes a target from the media manager
  384. * @remove: ioctl structure with target name to remove.
  385. *
  386. * Returns:
  387. * 0: on success
  388. * 1: on not found
  389. * <0: on error
  390. */
  391. static int nvm_remove_tgt(struct nvm_ioctl_remove *remove)
  392. {
  393. struct nvm_target *t = NULL;
  394. struct nvm_dev *dev;
  395. down_read(&nvm_lock);
  396. list_for_each_entry(dev, &nvm_devices, devices) {
  397. mutex_lock(&dev->mlock);
  398. t = nvm_find_target(dev, remove->tgtname);
  399. if (t) {
  400. mutex_unlock(&dev->mlock);
  401. break;
  402. }
  403. mutex_unlock(&dev->mlock);
  404. }
  405. up_read(&nvm_lock);
  406. if (!t) {
  407. pr_err("failed to remove target %s\n",
  408. remove->tgtname);
  409. return 1;
  410. }
  411. __nvm_remove_target(t, true);
  412. kref_put(&dev->ref, nvm_free);
  413. return 0;
  414. }
  415. static int nvm_register_map(struct nvm_dev *dev)
  416. {
  417. struct nvm_dev_map *rmap;
  418. int i, j;
  419. rmap = kmalloc(sizeof(struct nvm_dev_map), GFP_KERNEL);
  420. if (!rmap)
  421. goto err_rmap;
  422. rmap->chnls = kcalloc(dev->geo.num_ch, sizeof(struct nvm_ch_map),
  423. GFP_KERNEL);
  424. if (!rmap->chnls)
  425. goto err_chnls;
  426. for (i = 0; i < dev->geo.num_ch; i++) {
  427. struct nvm_ch_map *ch_rmap;
  428. int *lun_roffs;
  429. int luns_in_chnl = dev->geo.num_lun;
  430. ch_rmap = &rmap->chnls[i];
  431. ch_rmap->ch_off = -1;
  432. ch_rmap->num_lun = luns_in_chnl;
  433. lun_roffs = kcalloc(luns_in_chnl, sizeof(int), GFP_KERNEL);
  434. if (!lun_roffs)
  435. goto err_ch;
  436. for (j = 0; j < luns_in_chnl; j++)
  437. lun_roffs[j] = -1;
  438. ch_rmap->lun_offs = lun_roffs;
  439. }
  440. dev->rmap = rmap;
  441. return 0;
  442. err_ch:
  443. while (--i >= 0)
  444. kfree(rmap->chnls[i].lun_offs);
  445. err_chnls:
  446. kfree(rmap);
  447. err_rmap:
  448. return -ENOMEM;
  449. }
  450. static void nvm_unregister_map(struct nvm_dev *dev)
  451. {
  452. struct nvm_dev_map *rmap = dev->rmap;
  453. int i;
  454. for (i = 0; i < dev->geo.num_ch; i++)
  455. kfree(rmap->chnls[i].lun_offs);
  456. kfree(rmap->chnls);
  457. kfree(rmap);
  458. }
  459. static void nvm_map_to_dev(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
  460. {
  461. struct nvm_dev_map *dev_map = tgt_dev->map;
  462. struct nvm_ch_map *ch_map = &dev_map->chnls[p->a.ch];
  463. int lun_off = ch_map->lun_offs[p->a.lun];
  464. p->a.ch += ch_map->ch_off;
  465. p->a.lun += lun_off;
  466. }
  467. static void nvm_map_to_tgt(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *p)
  468. {
  469. struct nvm_dev *dev = tgt_dev->parent;
  470. struct nvm_dev_map *dev_rmap = dev->rmap;
  471. struct nvm_ch_map *ch_rmap = &dev_rmap->chnls[p->a.ch];
  472. int lun_roff = ch_rmap->lun_offs[p->a.lun];
  473. p->a.ch -= ch_rmap->ch_off;
  474. p->a.lun -= lun_roff;
  475. }
  476. static void nvm_ppa_tgt_to_dev(struct nvm_tgt_dev *tgt_dev,
  477. struct ppa_addr *ppa_list, int nr_ppas)
  478. {
  479. int i;
  480. for (i = 0; i < nr_ppas; i++) {
  481. nvm_map_to_dev(tgt_dev, &ppa_list[i]);
  482. ppa_list[i] = generic_to_dev_addr(tgt_dev->parent, ppa_list[i]);
  483. }
  484. }
  485. static void nvm_ppa_dev_to_tgt(struct nvm_tgt_dev *tgt_dev,
  486. struct ppa_addr *ppa_list, int nr_ppas)
  487. {
  488. int i;
  489. for (i = 0; i < nr_ppas; i++) {
  490. ppa_list[i] = dev_to_generic_addr(tgt_dev->parent, ppa_list[i]);
  491. nvm_map_to_tgt(tgt_dev, &ppa_list[i]);
  492. }
  493. }
  494. static void nvm_rq_tgt_to_dev(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
  495. {
  496. struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
  497. nvm_ppa_tgt_to_dev(tgt_dev, ppa_list, rqd->nr_ppas);
  498. }
  499. static void nvm_rq_dev_to_tgt(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd)
  500. {
  501. struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
  502. nvm_ppa_dev_to_tgt(tgt_dev, ppa_list, rqd->nr_ppas);
  503. }
  504. int nvm_register_tgt_type(struct nvm_tgt_type *tt)
  505. {
  506. int ret = 0;
  507. down_write(&nvm_tgtt_lock);
  508. if (__nvm_find_target_type(tt->name))
  509. ret = -EEXIST;
  510. else
  511. list_add(&tt->list, &nvm_tgt_types);
  512. up_write(&nvm_tgtt_lock);
  513. return ret;
  514. }
  515. EXPORT_SYMBOL(nvm_register_tgt_type);
  516. void nvm_unregister_tgt_type(struct nvm_tgt_type *tt)
  517. {
  518. if (!tt)
  519. return;
  520. down_write(&nvm_tgtt_lock);
  521. list_del(&tt->list);
  522. up_write(&nvm_tgtt_lock);
  523. }
  524. EXPORT_SYMBOL(nvm_unregister_tgt_type);
  525. void *nvm_dev_dma_alloc(struct nvm_dev *dev, gfp_t mem_flags,
  526. dma_addr_t *dma_handler)
  527. {
  528. return dev->ops->dev_dma_alloc(dev, dev->dma_pool, mem_flags,
  529. dma_handler);
  530. }
  531. EXPORT_SYMBOL(nvm_dev_dma_alloc);
  532. void nvm_dev_dma_free(struct nvm_dev *dev, void *addr, dma_addr_t dma_handler)
  533. {
  534. dev->ops->dev_dma_free(dev->dma_pool, addr, dma_handler);
  535. }
  536. EXPORT_SYMBOL(nvm_dev_dma_free);
  537. static struct nvm_dev *nvm_find_nvm_dev(const char *name)
  538. {
  539. struct nvm_dev *dev;
  540. list_for_each_entry(dev, &nvm_devices, devices)
  541. if (!strcmp(name, dev->name))
  542. return dev;
  543. return NULL;
  544. }
  545. static int nvm_set_rqd_ppalist(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
  546. const struct ppa_addr *ppas, int nr_ppas)
  547. {
  548. struct nvm_dev *dev = tgt_dev->parent;
  549. struct nvm_geo *geo = &tgt_dev->geo;
  550. int i, plane_cnt, pl_idx;
  551. struct ppa_addr ppa;
  552. if (geo->pln_mode == NVM_PLANE_SINGLE && nr_ppas == 1) {
  553. rqd->nr_ppas = nr_ppas;
  554. rqd->ppa_addr = ppas[0];
  555. return 0;
  556. }
  557. rqd->nr_ppas = nr_ppas;
  558. rqd->ppa_list = nvm_dev_dma_alloc(dev, GFP_KERNEL, &rqd->dma_ppa_list);
  559. if (!rqd->ppa_list) {
  560. pr_err("failed to allocate dma memory\n");
  561. return -ENOMEM;
  562. }
  563. plane_cnt = geo->pln_mode;
  564. rqd->nr_ppas *= plane_cnt;
  565. for (i = 0; i < nr_ppas; i++) {
  566. for (pl_idx = 0; pl_idx < plane_cnt; pl_idx++) {
  567. ppa = ppas[i];
  568. ppa.g.pl = pl_idx;
  569. rqd->ppa_list[(pl_idx * nr_ppas) + i] = ppa;
  570. }
  571. }
  572. return 0;
  573. }
  574. static void nvm_free_rqd_ppalist(struct nvm_tgt_dev *tgt_dev,
  575. struct nvm_rq *rqd)
  576. {
  577. if (!rqd->ppa_list)
  578. return;
  579. nvm_dev_dma_free(tgt_dev->parent, rqd->ppa_list, rqd->dma_ppa_list);
  580. }
  581. static int nvm_set_flags(struct nvm_geo *geo, struct nvm_rq *rqd)
  582. {
  583. int flags = 0;
  584. if (geo->version == NVM_OCSSD_SPEC_20)
  585. return 0;
  586. if (rqd->is_seq)
  587. flags |= geo->pln_mode >> 1;
  588. if (rqd->opcode == NVM_OP_PREAD)
  589. flags |= (NVM_IO_SCRAMBLE_ENABLE | NVM_IO_SUSPEND);
  590. else if (rqd->opcode == NVM_OP_PWRITE)
  591. flags |= NVM_IO_SCRAMBLE_ENABLE;
  592. return flags;
  593. }
  594. int nvm_submit_io(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd, void *buf)
  595. {
  596. struct nvm_dev *dev = tgt_dev->parent;
  597. int ret;
  598. if (!dev->ops->submit_io)
  599. return -ENODEV;
  600. nvm_rq_tgt_to_dev(tgt_dev, rqd);
  601. rqd->dev = tgt_dev;
  602. rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
  603. /* In case of error, fail with right address format */
  604. ret = dev->ops->submit_io(dev, rqd, buf);
  605. if (ret)
  606. nvm_rq_dev_to_tgt(tgt_dev, rqd);
  607. return ret;
  608. }
  609. EXPORT_SYMBOL(nvm_submit_io);
  610. static void nvm_sync_end_io(struct nvm_rq *rqd)
  611. {
  612. struct completion *waiting = rqd->private;
  613. complete(waiting);
  614. }
  615. static int nvm_submit_io_wait(struct nvm_dev *dev, struct nvm_rq *rqd,
  616. void *buf)
  617. {
  618. DECLARE_COMPLETION_ONSTACK(wait);
  619. int ret = 0;
  620. rqd->end_io = nvm_sync_end_io;
  621. rqd->private = &wait;
  622. ret = dev->ops->submit_io(dev, rqd, buf);
  623. if (ret)
  624. return ret;
  625. wait_for_completion_io(&wait);
  626. return 0;
  627. }
  628. int nvm_submit_io_sync(struct nvm_tgt_dev *tgt_dev, struct nvm_rq *rqd,
  629. void *buf)
  630. {
  631. struct nvm_dev *dev = tgt_dev->parent;
  632. int ret;
  633. if (!dev->ops->submit_io)
  634. return -ENODEV;
  635. nvm_rq_tgt_to_dev(tgt_dev, rqd);
  636. rqd->dev = tgt_dev;
  637. rqd->flags = nvm_set_flags(&tgt_dev->geo, rqd);
  638. ret = nvm_submit_io_wait(dev, rqd, buf);
  639. return ret;
  640. }
  641. EXPORT_SYMBOL(nvm_submit_io_sync);
  642. void nvm_end_io(struct nvm_rq *rqd)
  643. {
  644. struct nvm_tgt_dev *tgt_dev = rqd->dev;
  645. /* Convert address space */
  646. if (tgt_dev)
  647. nvm_rq_dev_to_tgt(tgt_dev, rqd);
  648. if (rqd->end_io)
  649. rqd->end_io(rqd);
  650. }
  651. EXPORT_SYMBOL(nvm_end_io);
  652. static int nvm_submit_io_sync_raw(struct nvm_dev *dev, struct nvm_rq *rqd)
  653. {
  654. if (!dev->ops->submit_io)
  655. return -ENODEV;
  656. rqd->dev = NULL;
  657. rqd->flags = nvm_set_flags(&dev->geo, rqd);
  658. return nvm_submit_io_wait(dev, rqd, NULL);
  659. }
  660. static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa)
  661. {
  662. struct nvm_rq rqd = { NULL };
  663. struct bio bio;
  664. struct bio_vec bio_vec;
  665. struct page *page;
  666. int ret;
  667. page = alloc_page(GFP_KERNEL);
  668. if (!page)
  669. return -ENOMEM;
  670. bio_init(&bio, &bio_vec, 1);
  671. bio_add_page(&bio, page, PAGE_SIZE, 0);
  672. bio_set_op_attrs(&bio, REQ_OP_READ, 0);
  673. rqd.bio = &bio;
  674. rqd.opcode = NVM_OP_PREAD;
  675. rqd.is_seq = 1;
  676. rqd.nr_ppas = 1;
  677. rqd.ppa_addr = generic_to_dev_addr(dev, ppa);
  678. ret = nvm_submit_io_sync_raw(dev, &rqd);
  679. __free_page(page);
  680. if (ret)
  681. return ret;
  682. return rqd.error;
  683. }
  684. /*
  685. * Scans a 1.2 chunk first and last page to determine if its state.
  686. * If the chunk is found to be open, also scan it to update the write
  687. * pointer.
  688. */
  689. static int nvm_bb_chunk_scan(struct nvm_dev *dev, struct ppa_addr ppa,
  690. struct nvm_chk_meta *meta)
  691. {
  692. struct nvm_geo *geo = &dev->geo;
  693. int ret, pg, pl;
  694. /* sense first page */
  695. ret = nvm_bb_chunk_sense(dev, ppa);
  696. if (ret < 0) /* io error */
  697. return ret;
  698. else if (ret == 0) /* valid data */
  699. meta->state = NVM_CHK_ST_OPEN;
  700. else if (ret > 0) {
  701. /*
  702. * If empty page, the chunk is free, else it is an
  703. * actual io error. In that case, mark it offline.
  704. */
  705. switch (ret) {
  706. case NVM_RSP_ERR_EMPTYPAGE:
  707. meta->state = NVM_CHK_ST_FREE;
  708. return 0;
  709. case NVM_RSP_ERR_FAILCRC:
  710. case NVM_RSP_ERR_FAILECC:
  711. case NVM_RSP_WARN_HIGHECC:
  712. meta->state = NVM_CHK_ST_OPEN;
  713. goto scan;
  714. default:
  715. return -ret; /* other io error */
  716. }
  717. }
  718. /* sense last page */
  719. ppa.g.pg = geo->num_pg - 1;
  720. ppa.g.pl = geo->num_pln - 1;
  721. ret = nvm_bb_chunk_sense(dev, ppa);
  722. if (ret < 0) /* io error */
  723. return ret;
  724. else if (ret == 0) { /* Chunk fully written */
  725. meta->state = NVM_CHK_ST_CLOSED;
  726. meta->wp = geo->clba;
  727. return 0;
  728. } else if (ret > 0) {
  729. switch (ret) {
  730. case NVM_RSP_ERR_EMPTYPAGE:
  731. case NVM_RSP_ERR_FAILCRC:
  732. case NVM_RSP_ERR_FAILECC:
  733. case NVM_RSP_WARN_HIGHECC:
  734. meta->state = NVM_CHK_ST_OPEN;
  735. break;
  736. default:
  737. return -ret; /* other io error */
  738. }
  739. }
  740. scan:
  741. /*
  742. * chunk is open, we scan sequentially to update the write pointer.
  743. * We make the assumption that targets write data across all planes
  744. * before moving to the next page.
  745. */
  746. for (pg = 0; pg < geo->num_pg; pg++) {
  747. for (pl = 0; pl < geo->num_pln; pl++) {
  748. ppa.g.pg = pg;
  749. ppa.g.pl = pl;
  750. ret = nvm_bb_chunk_sense(dev, ppa);
  751. if (ret < 0) /* io error */
  752. return ret;
  753. else if (ret == 0) {
  754. meta->wp += geo->ws_min;
  755. } else if (ret > 0) {
  756. switch (ret) {
  757. case NVM_RSP_ERR_EMPTYPAGE:
  758. return 0;
  759. case NVM_RSP_ERR_FAILCRC:
  760. case NVM_RSP_ERR_FAILECC:
  761. case NVM_RSP_WARN_HIGHECC:
  762. meta->wp += geo->ws_min;
  763. break;
  764. default:
  765. return -ret; /* other io error */
  766. }
  767. }
  768. }
  769. }
  770. return 0;
  771. }
  772. /*
  773. * folds a bad block list from its plane representation to its
  774. * chunk representation.
  775. *
  776. * If any of the planes status are bad or grown bad, the chunk is marked
  777. * offline. If not bad, the first plane state acts as the chunk state.
  778. */
  779. static int nvm_bb_to_chunk(struct nvm_dev *dev, struct ppa_addr ppa,
  780. u8 *blks, int nr_blks, struct nvm_chk_meta *meta)
  781. {
  782. struct nvm_geo *geo = &dev->geo;
  783. int ret, blk, pl, offset, blktype;
  784. for (blk = 0; blk < geo->num_chk; blk++) {
  785. offset = blk * geo->pln_mode;
  786. blktype = blks[offset];
  787. for (pl = 0; pl < geo->pln_mode; pl++) {
  788. if (blks[offset + pl] &
  789. (NVM_BLK_T_BAD|NVM_BLK_T_GRWN_BAD)) {
  790. blktype = blks[offset + pl];
  791. break;
  792. }
  793. }
  794. ppa.g.blk = blk;
  795. meta->wp = 0;
  796. meta->type = NVM_CHK_TP_W_SEQ;
  797. meta->wi = 0;
  798. meta->slba = generic_to_dev_addr(dev, ppa).ppa;
  799. meta->cnlb = dev->geo.clba;
  800. if (blktype == NVM_BLK_T_FREE) {
  801. ret = nvm_bb_chunk_scan(dev, ppa, meta);
  802. if (ret)
  803. return ret;
  804. } else {
  805. meta->state = NVM_CHK_ST_OFFLINE;
  806. }
  807. meta++;
  808. }
  809. return 0;
  810. }
  811. static int nvm_get_bb_meta(struct nvm_dev *dev, sector_t slba,
  812. int nchks, struct nvm_chk_meta *meta)
  813. {
  814. struct nvm_geo *geo = &dev->geo;
  815. struct ppa_addr ppa;
  816. u8 *blks;
  817. int ch, lun, nr_blks;
  818. int ret = 0;
  819. ppa.ppa = slba;
  820. ppa = dev_to_generic_addr(dev, ppa);
  821. if (ppa.g.blk != 0)
  822. return -EINVAL;
  823. if ((nchks % geo->num_chk) != 0)
  824. return -EINVAL;
  825. nr_blks = geo->num_chk * geo->pln_mode;
  826. blks = kmalloc(nr_blks, GFP_KERNEL);
  827. if (!blks)
  828. return -ENOMEM;
  829. for (ch = ppa.g.ch; ch < geo->num_ch; ch++) {
  830. for (lun = ppa.g.lun; lun < geo->num_lun; lun++) {
  831. struct ppa_addr ppa_gen, ppa_dev;
  832. if (!nchks)
  833. goto done;
  834. ppa_gen.ppa = 0;
  835. ppa_gen.g.ch = ch;
  836. ppa_gen.g.lun = lun;
  837. ppa_dev = generic_to_dev_addr(dev, ppa_gen);
  838. ret = dev->ops->get_bb_tbl(dev, ppa_dev, blks);
  839. if (ret)
  840. goto done;
  841. ret = nvm_bb_to_chunk(dev, ppa_gen, blks, nr_blks,
  842. meta);
  843. if (ret)
  844. goto done;
  845. meta += geo->num_chk;
  846. nchks -= geo->num_chk;
  847. }
  848. }
  849. done:
  850. kfree(blks);
  851. return ret;
  852. }
  853. int nvm_get_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr ppa,
  854. int nchks, struct nvm_chk_meta *meta)
  855. {
  856. struct nvm_dev *dev = tgt_dev->parent;
  857. nvm_ppa_tgt_to_dev(tgt_dev, &ppa, 1);
  858. if (dev->geo.version == NVM_OCSSD_SPEC_12)
  859. return nvm_get_bb_meta(dev, (sector_t)ppa.ppa, nchks, meta);
  860. return dev->ops->get_chk_meta(dev, (sector_t)ppa.ppa, nchks, meta);
  861. }
  862. EXPORT_SYMBOL_GPL(nvm_get_chunk_meta);
  863. int nvm_set_chunk_meta(struct nvm_tgt_dev *tgt_dev, struct ppa_addr *ppas,
  864. int nr_ppas, int type)
  865. {
  866. struct nvm_dev *dev = tgt_dev->parent;
  867. struct nvm_rq rqd;
  868. int ret;
  869. if (dev->geo.version == NVM_OCSSD_SPEC_20)
  870. return 0;
  871. if (nr_ppas > NVM_MAX_VLBA) {
  872. pr_err("unable to update all blocks atomically\n");
  873. return -EINVAL;
  874. }
  875. memset(&rqd, 0, sizeof(struct nvm_rq));
  876. nvm_set_rqd_ppalist(tgt_dev, &rqd, ppas, nr_ppas);
  877. nvm_rq_tgt_to_dev(tgt_dev, &rqd);
  878. ret = dev->ops->set_bb_tbl(dev, &rqd.ppa_addr, rqd.nr_ppas, type);
  879. nvm_free_rqd_ppalist(tgt_dev, &rqd);
  880. if (ret)
  881. return -EINVAL;
  882. return 0;
  883. }
  884. EXPORT_SYMBOL_GPL(nvm_set_chunk_meta);
  885. static int nvm_core_init(struct nvm_dev *dev)
  886. {
  887. struct nvm_geo *geo = &dev->geo;
  888. int ret;
  889. dev->lun_map = kcalloc(BITS_TO_LONGS(geo->all_luns),
  890. sizeof(unsigned long), GFP_KERNEL);
  891. if (!dev->lun_map)
  892. return -ENOMEM;
  893. INIT_LIST_HEAD(&dev->area_list);
  894. INIT_LIST_HEAD(&dev->targets);
  895. mutex_init(&dev->mlock);
  896. spin_lock_init(&dev->lock);
  897. ret = nvm_register_map(dev);
  898. if (ret)
  899. goto err_fmtype;
  900. return 0;
  901. err_fmtype:
  902. kfree(dev->lun_map);
  903. return ret;
  904. }
  905. static void nvm_free(struct kref *ref)
  906. {
  907. struct nvm_dev *dev = container_of(ref, struct nvm_dev, ref);
  908. if (dev->dma_pool)
  909. dev->ops->destroy_dma_pool(dev->dma_pool);
  910. if (dev->rmap)
  911. nvm_unregister_map(dev);
  912. kfree(dev->lun_map);
  913. kfree(dev);
  914. }
  915. static int nvm_init(struct nvm_dev *dev)
  916. {
  917. struct nvm_geo *geo = &dev->geo;
  918. int ret = -EINVAL;
  919. if (dev->ops->identity(dev)) {
  920. pr_err("device could not be identified\n");
  921. goto err;
  922. }
  923. pr_debug("ver:%u.%u nvm_vendor:%x\n", geo->major_ver_id,
  924. geo->minor_ver_id, geo->vmnt);
  925. ret = nvm_core_init(dev);
  926. if (ret) {
  927. pr_err("could not initialize core structures.\n");
  928. goto err;
  929. }
  930. pr_info("registered %s [%u/%u/%u/%u/%u]\n",
  931. dev->name, dev->geo.ws_min, dev->geo.ws_opt,
  932. dev->geo.num_chk, dev->geo.all_luns,
  933. dev->geo.num_ch);
  934. return 0;
  935. err:
  936. pr_err("failed to initialize nvm\n");
  937. return ret;
  938. }
  939. struct nvm_dev *nvm_alloc_dev(int node)
  940. {
  941. struct nvm_dev *dev;
  942. dev = kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node);
  943. if (dev)
  944. kref_init(&dev->ref);
  945. return dev;
  946. }
  947. EXPORT_SYMBOL(nvm_alloc_dev);
  948. int nvm_register(struct nvm_dev *dev)
  949. {
  950. int ret, exp_pool_size;
  951. if (!dev->q || !dev->ops) {
  952. kref_put(&dev->ref, nvm_free);
  953. return -EINVAL;
  954. }
  955. ret = nvm_init(dev);
  956. if (ret) {
  957. kref_put(&dev->ref, nvm_free);
  958. return ret;
  959. }
  960. exp_pool_size = max_t(int, PAGE_SIZE,
  961. (NVM_MAX_VLBA * (sizeof(u64) + dev->geo.sos)));
  962. exp_pool_size = round_up(exp_pool_size, PAGE_SIZE);
  963. dev->dma_pool = dev->ops->create_dma_pool(dev, "ppalist",
  964. exp_pool_size);
  965. if (!dev->dma_pool) {
  966. pr_err("could not create dma pool\n");
  967. kref_put(&dev->ref, nvm_free);
  968. return -ENOMEM;
  969. }
  970. /* register device with a supported media manager */
  971. down_write(&nvm_lock);
  972. list_add(&dev->devices, &nvm_devices);
  973. up_write(&nvm_lock);
  974. return 0;
  975. }
  976. EXPORT_SYMBOL(nvm_register);
  977. void nvm_unregister(struct nvm_dev *dev)
  978. {
  979. struct nvm_target *t, *tmp;
  980. mutex_lock(&dev->mlock);
  981. list_for_each_entry_safe(t, tmp, &dev->targets, list) {
  982. if (t->dev->parent != dev)
  983. continue;
  984. __nvm_remove_target(t, false);
  985. kref_put(&dev->ref, nvm_free);
  986. }
  987. mutex_unlock(&dev->mlock);
  988. down_write(&nvm_lock);
  989. list_del(&dev->devices);
  990. up_write(&nvm_lock);
  991. kref_put(&dev->ref, nvm_free);
  992. }
  993. EXPORT_SYMBOL(nvm_unregister);
  994. static int __nvm_configure_create(struct nvm_ioctl_create *create)
  995. {
  996. struct nvm_dev *dev;
  997. int ret;
  998. down_write(&nvm_lock);
  999. dev = nvm_find_nvm_dev(create->dev);
  1000. up_write(&nvm_lock);
  1001. if (!dev) {
  1002. pr_err("device not found\n");
  1003. return -EINVAL;
  1004. }
  1005. kref_get(&dev->ref);
  1006. ret = nvm_create_tgt(dev, create);
  1007. if (ret)
  1008. kref_put(&dev->ref, nvm_free);
  1009. return ret;
  1010. }
  1011. static long nvm_ioctl_info(struct file *file, void __user *arg)
  1012. {
  1013. struct nvm_ioctl_info *info;
  1014. struct nvm_tgt_type *tt;
  1015. int tgt_iter = 0;
  1016. info = memdup_user(arg, sizeof(struct nvm_ioctl_info));
  1017. if (IS_ERR(info))
  1018. return -EFAULT;
  1019. info->version[0] = NVM_VERSION_MAJOR;
  1020. info->version[1] = NVM_VERSION_MINOR;
  1021. info->version[2] = NVM_VERSION_PATCH;
  1022. down_write(&nvm_tgtt_lock);
  1023. list_for_each_entry(tt, &nvm_tgt_types, list) {
  1024. struct nvm_ioctl_info_tgt *tgt = &info->tgts[tgt_iter];
  1025. tgt->version[0] = tt->version[0];
  1026. tgt->version[1] = tt->version[1];
  1027. tgt->version[2] = tt->version[2];
  1028. strncpy(tgt->tgtname, tt->name, NVM_TTYPE_NAME_MAX);
  1029. tgt_iter++;
  1030. }
  1031. info->tgtsize = tgt_iter;
  1032. up_write(&nvm_tgtt_lock);
  1033. if (copy_to_user(arg, info, sizeof(struct nvm_ioctl_info))) {
  1034. kfree(info);
  1035. return -EFAULT;
  1036. }
  1037. kfree(info);
  1038. return 0;
  1039. }
  1040. static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
  1041. {
  1042. struct nvm_ioctl_get_devices *devices;
  1043. struct nvm_dev *dev;
  1044. int i = 0;
  1045. devices = kzalloc(sizeof(struct nvm_ioctl_get_devices), GFP_KERNEL);
  1046. if (!devices)
  1047. return -ENOMEM;
  1048. down_write(&nvm_lock);
  1049. list_for_each_entry(dev, &nvm_devices, devices) {
  1050. struct nvm_ioctl_device_info *info = &devices->info[i];
  1051. strlcpy(info->devname, dev->name, sizeof(info->devname));
  1052. /* kept for compatibility */
  1053. info->bmversion[0] = 1;
  1054. info->bmversion[1] = 0;
  1055. info->bmversion[2] = 0;
  1056. strlcpy(info->bmname, "gennvm", sizeof(info->bmname));
  1057. i++;
  1058. if (i >= ARRAY_SIZE(devices->info)) {
  1059. pr_err("max %zd devices can be reported.\n",
  1060. ARRAY_SIZE(devices->info));
  1061. break;
  1062. }
  1063. }
  1064. up_write(&nvm_lock);
  1065. devices->nr_devices = i;
  1066. if (copy_to_user(arg, devices,
  1067. sizeof(struct nvm_ioctl_get_devices))) {
  1068. kfree(devices);
  1069. return -EFAULT;
  1070. }
  1071. kfree(devices);
  1072. return 0;
  1073. }
  1074. static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
  1075. {
  1076. struct nvm_ioctl_create create;
  1077. if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
  1078. return -EFAULT;
  1079. if (create.conf.type == NVM_CONFIG_TYPE_EXTENDED &&
  1080. create.conf.e.rsv != 0) {
  1081. pr_err("reserved config field in use\n");
  1082. return -EINVAL;
  1083. }
  1084. create.dev[DISK_NAME_LEN - 1] = '\0';
  1085. create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
  1086. create.tgtname[DISK_NAME_LEN - 1] = '\0';
  1087. if (create.flags != 0) {
  1088. __u32 flags = create.flags;
  1089. /* Check for valid flags */
  1090. if (flags & NVM_TARGET_FACTORY)
  1091. flags &= ~NVM_TARGET_FACTORY;
  1092. if (flags) {
  1093. pr_err("flag not supported\n");
  1094. return -EINVAL;
  1095. }
  1096. }
  1097. return __nvm_configure_create(&create);
  1098. }
  1099. static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
  1100. {
  1101. struct nvm_ioctl_remove remove;
  1102. if (copy_from_user(&remove, arg, sizeof(struct nvm_ioctl_remove)))
  1103. return -EFAULT;
  1104. remove.tgtname[DISK_NAME_LEN - 1] = '\0';
  1105. if (remove.flags != 0) {
  1106. pr_err("no flags supported\n");
  1107. return -EINVAL;
  1108. }
  1109. return nvm_remove_tgt(&remove);
  1110. }
  1111. /* kept for compatibility reasons */
  1112. static long nvm_ioctl_dev_init(struct file *file, void __user *arg)
  1113. {
  1114. struct nvm_ioctl_dev_init init;
  1115. if (copy_from_user(&init, arg, sizeof(struct nvm_ioctl_dev_init)))
  1116. return -EFAULT;
  1117. if (init.flags != 0) {
  1118. pr_err("no flags supported\n");
  1119. return -EINVAL;
  1120. }
  1121. return 0;
  1122. }
  1123. /* Kept for compatibility reasons */
  1124. static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
  1125. {
  1126. struct nvm_ioctl_dev_factory fact;
  1127. if (copy_from_user(&fact, arg, sizeof(struct nvm_ioctl_dev_factory)))
  1128. return -EFAULT;
  1129. fact.dev[DISK_NAME_LEN - 1] = '\0';
  1130. if (fact.flags & ~(NVM_FACTORY_NR_BITS - 1))
  1131. return -EINVAL;
  1132. return 0;
  1133. }
  1134. static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
  1135. {
  1136. void __user *argp = (void __user *)arg;
  1137. if (!capable(CAP_SYS_ADMIN))
  1138. return -EPERM;
  1139. switch (cmd) {
  1140. case NVM_INFO:
  1141. return nvm_ioctl_info(file, argp);
  1142. case NVM_GET_DEVICES:
  1143. return nvm_ioctl_get_devices(file, argp);
  1144. case NVM_DEV_CREATE:
  1145. return nvm_ioctl_dev_create(file, argp);
  1146. case NVM_DEV_REMOVE:
  1147. return nvm_ioctl_dev_remove(file, argp);
  1148. case NVM_DEV_INIT:
  1149. return nvm_ioctl_dev_init(file, argp);
  1150. case NVM_DEV_FACTORY:
  1151. return nvm_ioctl_dev_factory(file, argp);
  1152. }
  1153. return 0;
  1154. }
  1155. static const struct file_operations _ctl_fops = {
  1156. .open = nonseekable_open,
  1157. .unlocked_ioctl = nvm_ctl_ioctl,
  1158. .owner = THIS_MODULE,
  1159. .llseek = noop_llseek,
  1160. };
  1161. static struct miscdevice _nvm_misc = {
  1162. .minor = MISC_DYNAMIC_MINOR,
  1163. .name = "lightnvm",
  1164. .nodename = "lightnvm/control",
  1165. .fops = &_ctl_fops,
  1166. };
  1167. builtin_misc_device(_nvm_misc);