nand.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright 2017 - Free Electrons
  4. *
  5. * Authors:
  6. * Boris Brezillon <boris.brezillon@free-electrons.com>
  7. * Peter Pan <peterpandong@micron.com>
  8. */
  9. #ifndef __LINUX_MTD_NAND_H
  10. #define __LINUX_MTD_NAND_H
  11. #include <linux/mtd/mtd.h>
  12. /**
  13. * struct nand_memory_organization - Memory organization structure
  14. * @bits_per_cell: number of bits per NAND cell
  15. * @pagesize: page size
  16. * @oobsize: OOB area size
  17. * @pages_per_eraseblock: number of pages per eraseblock
  18. * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
  19. * @planes_per_lun: number of planes per LUN
  20. * @luns_per_target: number of LUN per target (target is a synonym for die)
  21. * @ntargets: total number of targets exposed by the NAND device
  22. */
  23. struct nand_memory_organization {
  24. unsigned int bits_per_cell;
  25. unsigned int pagesize;
  26. unsigned int oobsize;
  27. unsigned int pages_per_eraseblock;
  28. unsigned int eraseblocks_per_lun;
  29. unsigned int planes_per_lun;
  30. unsigned int luns_per_target;
  31. unsigned int ntargets;
  32. };
  33. #define NAND_MEMORG(bpc, ps, os, ppe, epl, ppl, lpt, nt) \
  34. { \
  35. .bits_per_cell = (bpc), \
  36. .pagesize = (ps), \
  37. .oobsize = (os), \
  38. .pages_per_eraseblock = (ppe), \
  39. .eraseblocks_per_lun = (epl), \
  40. .planes_per_lun = (ppl), \
  41. .luns_per_target = (lpt), \
  42. .ntargets = (nt), \
  43. }
  44. /**
  45. * struct nand_row_converter - Information needed to convert an absolute offset
  46. * into a row address
  47. * @lun_addr_shift: position of the LUN identifier in the row address
  48. * @eraseblock_addr_shift: position of the eraseblock identifier in the row
  49. * address
  50. */
  51. struct nand_row_converter {
  52. unsigned int lun_addr_shift;
  53. unsigned int eraseblock_addr_shift;
  54. };
  55. /**
  56. * struct nand_pos - NAND position object
  57. * @target: the NAND target/die
  58. * @lun: the LUN identifier
  59. * @plane: the plane within the LUN
  60. * @eraseblock: the eraseblock within the LUN
  61. * @page: the page within the LUN
  62. *
  63. * These information are usually used by specific sub-layers to select the
  64. * appropriate target/die and generate a row address to pass to the device.
  65. */
  66. struct nand_pos {
  67. unsigned int target;
  68. unsigned int lun;
  69. unsigned int plane;
  70. unsigned int eraseblock;
  71. unsigned int page;
  72. };
  73. /**
  74. * struct nand_page_io_req - NAND I/O request object
  75. * @pos: the position this I/O request is targeting
  76. * @dataoffs: the offset within the page
  77. * @datalen: number of data bytes to read from/write to this page
  78. * @databuf: buffer to store data in or get data from
  79. * @ooboffs: the OOB offset within the page
  80. * @ooblen: the number of OOB bytes to read from/write to this page
  81. * @oobbuf: buffer to store OOB data in or get OOB data from
  82. * @mode: one of the %MTD_OPS_XXX mode
  83. *
  84. * This object is used to pass per-page I/O requests to NAND sub-layers. This
  85. * way all useful information are already formatted in a useful way and
  86. * specific NAND layers can focus on translating these information into
  87. * specific commands/operations.
  88. */
  89. struct nand_page_io_req {
  90. struct nand_pos pos;
  91. unsigned int dataoffs;
  92. unsigned int datalen;
  93. union {
  94. const void *out;
  95. void *in;
  96. } databuf;
  97. unsigned int ooboffs;
  98. unsigned int ooblen;
  99. union {
  100. const void *out;
  101. void *in;
  102. } oobbuf;
  103. int mode;
  104. };
  105. /**
  106. * struct nand_ecc_req - NAND ECC requirements
  107. * @strength: ECC strength
  108. * @step_size: ECC step/block size
  109. */
  110. struct nand_ecc_req {
  111. unsigned int strength;
  112. unsigned int step_size;
  113. };
  114. #define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
  115. /**
  116. * struct nand_bbt - bad block table object
  117. * @cache: in memory BBT cache
  118. */
  119. struct nand_bbt {
  120. unsigned long *cache;
  121. };
  122. struct nand_device;
  123. /**
  124. * struct nand_ops - NAND operations
  125. * @erase: erase a specific block. No need to check if the block is bad before
  126. * erasing, this has been taken care of by the generic NAND layer
  127. * @markbad: mark a specific block bad. No need to check if the block is
  128. * already marked bad, this has been taken care of by the generic
  129. * NAND layer. This method should just write the BBM (Bad Block
  130. * Marker) so that future call to struct_nand_ops->isbad() return
  131. * true
  132. * @isbad: check whether a block is bad or not. This method should just read
  133. * the BBM and return whether the block is bad or not based on what it
  134. * reads
  135. *
  136. * These are all low level operations that should be implemented by specialized
  137. * NAND layers (SPI NAND, raw NAND, ...).
  138. */
  139. struct nand_ops {
  140. int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
  141. int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
  142. bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
  143. };
  144. /**
  145. * struct nand_device - NAND device
  146. * @mtd: MTD instance attached to the NAND device
  147. * @memorg: memory layout
  148. * @eccreq: ECC requirements
  149. * @rowconv: position to row address converter
  150. * @bbt: bad block table info
  151. * @ops: NAND operations attached to the NAND device
  152. *
  153. * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
  154. * should declare their own NAND object embedding a nand_device struct (that's
  155. * how inheritance is done).
  156. * struct_nand_device->memorg and struct_nand_device->eccreq should be filled
  157. * at device detection time to reflect the NAND device
  158. * capabilities/requirements. Once this is done nanddev_init() can be called.
  159. * It will take care of converting NAND information into MTD ones, which means
  160. * the specialized NAND layers should never manually tweak
  161. * struct_nand_device->mtd except for the ->_read/write() hooks.
  162. */
  163. struct nand_device {
  164. struct mtd_info mtd;
  165. struct nand_memory_organization memorg;
  166. struct nand_ecc_req eccreq;
  167. struct nand_row_converter rowconv;
  168. struct nand_bbt bbt;
  169. const struct nand_ops *ops;
  170. };
  171. /**
  172. * struct nand_io_iter - NAND I/O iterator
  173. * @req: current I/O request
  174. * @oobbytes_per_page: maximum number of OOB bytes per page
  175. * @dataleft: remaining number of data bytes to read/write
  176. * @oobleft: remaining number of OOB bytes to read/write
  177. *
  178. * Can be used by specialized NAND layers to iterate over all pages covered
  179. * by an MTD I/O request, which should greatly simplifies the boiler-plate
  180. * code needed to read/write data from/to a NAND device.
  181. */
  182. struct nand_io_iter {
  183. struct nand_page_io_req req;
  184. unsigned int oobbytes_per_page;
  185. unsigned int dataleft;
  186. unsigned int oobleft;
  187. };
  188. /**
  189. * mtd_to_nanddev() - Get the NAND device attached to the MTD instance
  190. * @mtd: MTD instance
  191. *
  192. * Return: the NAND device embedding @mtd.
  193. */
  194. static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
  195. {
  196. return container_of(mtd, struct nand_device, mtd);
  197. }
  198. /**
  199. * nanddev_to_mtd() - Get the MTD device attached to a NAND device
  200. * @nand: NAND device
  201. *
  202. * Return: the MTD device embedded in @nand.
  203. */
  204. static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
  205. {
  206. return &nand->mtd;
  207. }
  208. /*
  209. * nanddev_bits_per_cell() - Get the number of bits per cell
  210. * @nand: NAND device
  211. *
  212. * Return: the number of bits per cell.
  213. */
  214. static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
  215. {
  216. return nand->memorg.bits_per_cell;
  217. }
  218. /**
  219. * nanddev_page_size() - Get NAND page size
  220. * @nand: NAND device
  221. *
  222. * Return: the page size.
  223. */
  224. static inline size_t nanddev_page_size(const struct nand_device *nand)
  225. {
  226. return nand->memorg.pagesize;
  227. }
  228. /**
  229. * nanddev_per_page_oobsize() - Get NAND OOB size
  230. * @nand: NAND device
  231. *
  232. * Return: the OOB size.
  233. */
  234. static inline unsigned int
  235. nanddev_per_page_oobsize(const struct nand_device *nand)
  236. {
  237. return nand->memorg.oobsize;
  238. }
  239. /**
  240. * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
  241. * @nand: NAND device
  242. *
  243. * Return: the number of pages per eraseblock.
  244. */
  245. static inline unsigned int
  246. nanddev_pages_per_eraseblock(const struct nand_device *nand)
  247. {
  248. return nand->memorg.pages_per_eraseblock;
  249. }
  250. /**
  251. * nanddev_per_page_oobsize() - Get NAND erase block size
  252. * @nand: NAND device
  253. *
  254. * Return: the eraseblock size.
  255. */
  256. static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
  257. {
  258. return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
  259. }
  260. /**
  261. * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
  262. * @nand: NAND device
  263. *
  264. * Return: the number of eraseblocks per LUN.
  265. */
  266. static inline unsigned int
  267. nanddev_eraseblocks_per_lun(const struct nand_device *nand)
  268. {
  269. return nand->memorg.eraseblocks_per_lun;
  270. }
  271. /**
  272. * nanddev_target_size() - Get the total size provided by a single target/die
  273. * @nand: NAND device
  274. *
  275. * Return: the total size exposed by a single target/die in bytes.
  276. */
  277. static inline u64 nanddev_target_size(const struct nand_device *nand)
  278. {
  279. return (u64)nand->memorg.luns_per_target *
  280. nand->memorg.eraseblocks_per_lun *
  281. nand->memorg.pages_per_eraseblock *
  282. nand->memorg.pagesize;
  283. }
  284. /**
  285. * nanddev_ntarget() - Get the total of targets
  286. * @nand: NAND device
  287. *
  288. * Return: the number of targets/dies exposed by @nand.
  289. */
  290. static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
  291. {
  292. return nand->memorg.ntargets;
  293. }
  294. /**
  295. * nanddev_neraseblocks() - Get the total number of erasablocks
  296. * @nand: NAND device
  297. *
  298. * Return: the total number of eraseblocks exposed by @nand.
  299. */
  300. static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
  301. {
  302. return nand->memorg.ntargets * nand->memorg.luns_per_target *
  303. nand->memorg.eraseblocks_per_lun;
  304. }
  305. /**
  306. * nanddev_size() - Get NAND size
  307. * @nand: NAND device
  308. *
  309. * Return: the total size (in bytes) exposed by @nand.
  310. */
  311. static inline u64 nanddev_size(const struct nand_device *nand)
  312. {
  313. return nanddev_target_size(nand) * nanddev_ntargets(nand);
  314. }
  315. /**
  316. * nanddev_get_memorg() - Extract memory organization info from a NAND device
  317. * @nand: NAND device
  318. *
  319. * This can be used by the upper layer to fill the memorg info before calling
  320. * nanddev_init().
  321. *
  322. * Return: the memorg object embedded in the NAND device.
  323. */
  324. static inline struct nand_memory_organization *
  325. nanddev_get_memorg(struct nand_device *nand)
  326. {
  327. return &nand->memorg;
  328. }
  329. int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
  330. struct module *owner);
  331. void nanddev_cleanup(struct nand_device *nand);
  332. /**
  333. * nanddev_register() - Register a NAND device
  334. * @nand: NAND device
  335. *
  336. * Register a NAND device.
  337. * This function is just a wrapper around mtd_device_register()
  338. * registering the MTD device embedded in @nand.
  339. *
  340. * Return: 0 in case of success, a negative error code otherwise.
  341. */
  342. static inline int nanddev_register(struct nand_device *nand)
  343. {
  344. return mtd_device_register(&nand->mtd, NULL, 0);
  345. }
  346. /**
  347. * nanddev_unregister() - Unregister a NAND device
  348. * @nand: NAND device
  349. *
  350. * Unregister a NAND device.
  351. * This function is just a wrapper around mtd_device_unregister()
  352. * unregistering the MTD device embedded in @nand.
  353. *
  354. * Return: 0 in case of success, a negative error code otherwise.
  355. */
  356. static inline int nanddev_unregister(struct nand_device *nand)
  357. {
  358. return mtd_device_unregister(&nand->mtd);
  359. }
  360. /**
  361. * nanddev_set_of_node() - Attach a DT node to a NAND device
  362. * @nand: NAND device
  363. * @np: DT node
  364. *
  365. * Attach a DT node to a NAND device.
  366. */
  367. static inline void nanddev_set_of_node(struct nand_device *nand,
  368. struct device_node *np)
  369. {
  370. mtd_set_of_node(&nand->mtd, np);
  371. }
  372. /**
  373. * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device
  374. * @nand: NAND device
  375. *
  376. * Return: the DT node attached to @nand.
  377. */
  378. static inline struct device_node *nanddev_get_of_node(struct nand_device *nand)
  379. {
  380. return mtd_get_of_node(&nand->mtd);
  381. }
  382. /**
  383. * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
  384. * @nand: NAND device
  385. * @offs: absolute NAND offset (usually passed by the MTD layer)
  386. * @pos: a NAND position object to fill in
  387. *
  388. * Converts @offs into a nand_pos representation.
  389. *
  390. * Return: the offset within the NAND page pointed by @pos.
  391. */
  392. static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
  393. loff_t offs,
  394. struct nand_pos *pos)
  395. {
  396. unsigned int pageoffs;
  397. u64 tmp = offs;
  398. pageoffs = do_div(tmp, nand->memorg.pagesize);
  399. pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
  400. pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
  401. pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
  402. pos->lun = do_div(tmp, nand->memorg.luns_per_target);
  403. pos->target = tmp;
  404. return pageoffs;
  405. }
  406. /**
  407. * nanddev_pos_cmp() - Compare two NAND positions
  408. * @a: First NAND position
  409. * @b: Second NAND position
  410. *
  411. * Compares two NAND positions.
  412. *
  413. * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
  414. */
  415. static inline int nanddev_pos_cmp(const struct nand_pos *a,
  416. const struct nand_pos *b)
  417. {
  418. if (a->target != b->target)
  419. return a->target < b->target ? -1 : 1;
  420. if (a->lun != b->lun)
  421. return a->lun < b->lun ? -1 : 1;
  422. if (a->eraseblock != b->eraseblock)
  423. return a->eraseblock < b->eraseblock ? -1 : 1;
  424. if (a->page != b->page)
  425. return a->page < b->page ? -1 : 1;
  426. return 0;
  427. }
  428. /**
  429. * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
  430. * @nand: NAND device
  431. * @pos: the NAND position to convert
  432. *
  433. * Converts @pos NAND position into an absolute offset.
  434. *
  435. * Return: the absolute offset. Note that @pos points to the beginning of a
  436. * page, if one wants to point to a specific offset within this page
  437. * the returned offset has to be adjusted manually.
  438. */
  439. static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
  440. const struct nand_pos *pos)
  441. {
  442. unsigned int npages;
  443. npages = pos->page +
  444. ((pos->eraseblock +
  445. (pos->lun +
  446. (pos->target * nand->memorg.luns_per_target)) *
  447. nand->memorg.eraseblocks_per_lun) *
  448. nand->memorg.pages_per_eraseblock);
  449. return (loff_t)npages * nand->memorg.pagesize;
  450. }
  451. /**
  452. * nanddev_pos_to_row() - Extract a row address from a NAND position
  453. * @nand: NAND device
  454. * @pos: the position to convert
  455. *
  456. * Converts a NAND position into a row address that can then be passed to the
  457. * device.
  458. *
  459. * Return: the row address extracted from @pos.
  460. */
  461. static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
  462. const struct nand_pos *pos)
  463. {
  464. return (pos->lun << nand->rowconv.lun_addr_shift) |
  465. (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
  466. pos->page;
  467. }
  468. /**
  469. * nanddev_pos_next_target() - Move a position to the next target/die
  470. * @nand: NAND device
  471. * @pos: the position to update
  472. *
  473. * Updates @pos to point to the start of the next target/die. Useful when you
  474. * want to iterate over all targets/dies of a NAND device.
  475. */
  476. static inline void nanddev_pos_next_target(struct nand_device *nand,
  477. struct nand_pos *pos)
  478. {
  479. pos->page = 0;
  480. pos->plane = 0;
  481. pos->eraseblock = 0;
  482. pos->lun = 0;
  483. pos->target++;
  484. }
  485. /**
  486. * nanddev_pos_next_lun() - Move a position to the next LUN
  487. * @nand: NAND device
  488. * @pos: the position to update
  489. *
  490. * Updates @pos to point to the start of the next LUN. Useful when you want to
  491. * iterate over all LUNs of a NAND device.
  492. */
  493. static inline void nanddev_pos_next_lun(struct nand_device *nand,
  494. struct nand_pos *pos)
  495. {
  496. if (pos->lun >= nand->memorg.luns_per_target - 1)
  497. return nanddev_pos_next_target(nand, pos);
  498. pos->lun++;
  499. pos->page = 0;
  500. pos->plane = 0;
  501. pos->eraseblock = 0;
  502. }
  503. /**
  504. * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
  505. * @nand: NAND device
  506. * @pos: the position to update
  507. *
  508. * Updates @pos to point to the start of the next eraseblock. Useful when you
  509. * want to iterate over all eraseblocks of a NAND device.
  510. */
  511. static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
  512. struct nand_pos *pos)
  513. {
  514. if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
  515. return nanddev_pos_next_lun(nand, pos);
  516. pos->eraseblock++;
  517. pos->page = 0;
  518. pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
  519. }
  520. /**
  521. * nanddev_pos_next_page() - Move a position to the next page
  522. * @nand: NAND device
  523. * @pos: the position to update
  524. *
  525. * Updates @pos to point to the start of the next page. Useful when you want to
  526. * iterate over all pages of a NAND device.
  527. */
  528. static inline void nanddev_pos_next_page(struct nand_device *nand,
  529. struct nand_pos *pos)
  530. {
  531. if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
  532. return nanddev_pos_next_eraseblock(nand, pos);
  533. pos->page++;
  534. }
  535. /**
  536. * nand_io_iter_init - Initialize a NAND I/O iterator
  537. * @nand: NAND device
  538. * @offs: absolute offset
  539. * @req: MTD request
  540. * @iter: NAND I/O iterator
  541. *
  542. * Initializes a NAND iterator based on the information passed by the MTD
  543. * layer.
  544. */
  545. static inline void nanddev_io_iter_init(struct nand_device *nand,
  546. loff_t offs, struct mtd_oob_ops *req,
  547. struct nand_io_iter *iter)
  548. {
  549. struct mtd_info *mtd = nanddev_to_mtd(nand);
  550. iter->req.mode = req->mode;
  551. iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
  552. iter->req.ooboffs = req->ooboffs;
  553. iter->oobbytes_per_page = mtd_oobavail(mtd, req);
  554. iter->dataleft = req->len;
  555. iter->oobleft = req->ooblen;
  556. iter->req.databuf.in = req->datbuf;
  557. iter->req.datalen = min_t(unsigned int,
  558. nand->memorg.pagesize - iter->req.dataoffs,
  559. iter->dataleft);
  560. iter->req.oobbuf.in = req->oobbuf;
  561. iter->req.ooblen = min_t(unsigned int,
  562. iter->oobbytes_per_page - iter->req.ooboffs,
  563. iter->oobleft);
  564. }
  565. /**
  566. * nand_io_iter_next_page - Move to the next page
  567. * @nand: NAND device
  568. * @iter: NAND I/O iterator
  569. *
  570. * Updates the @iter to point to the next page.
  571. */
  572. static inline void nanddev_io_iter_next_page(struct nand_device *nand,
  573. struct nand_io_iter *iter)
  574. {
  575. nanddev_pos_next_page(nand, &iter->req.pos);
  576. iter->dataleft -= iter->req.datalen;
  577. iter->req.databuf.in += iter->req.datalen;
  578. iter->oobleft -= iter->req.ooblen;
  579. iter->req.oobbuf.in += iter->req.ooblen;
  580. iter->req.dataoffs = 0;
  581. iter->req.ooboffs = 0;
  582. iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
  583. iter->dataleft);
  584. iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
  585. iter->oobleft);
  586. }
  587. /**
  588. * nand_io_iter_end - Should end iteration or not
  589. * @nand: NAND device
  590. * @iter: NAND I/O iterator
  591. *
  592. * Check whether @iter has reached the end of the NAND portion it was asked to
  593. * iterate on or not.
  594. *
  595. * Return: true if @iter has reached the end of the iteration request, false
  596. * otherwise.
  597. */
  598. static inline bool nanddev_io_iter_end(struct nand_device *nand,
  599. const struct nand_io_iter *iter)
  600. {
  601. if (iter->dataleft || iter->oobleft)
  602. return false;
  603. return true;
  604. }
  605. /**
  606. * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
  607. * request
  608. * @nand: NAND device
  609. * @start: start address to read/write from
  610. * @req: MTD I/O request
  611. * @iter: NAND I/O iterator
  612. *
  613. * Should be used for iterate over pages that are contained in an MTD request.
  614. */
  615. #define nanddev_io_for_each_page(nand, start, req, iter) \
  616. for (nanddev_io_iter_init(nand, start, req, iter); \
  617. !nanddev_io_iter_end(nand, iter); \
  618. nanddev_io_iter_next_page(nand, iter))
  619. bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
  620. bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
  621. int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
  622. int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
  623. /* BBT related functions */
  624. enum nand_bbt_block_status {
  625. NAND_BBT_BLOCK_STATUS_UNKNOWN,
  626. NAND_BBT_BLOCK_GOOD,
  627. NAND_BBT_BLOCK_WORN,
  628. NAND_BBT_BLOCK_RESERVED,
  629. NAND_BBT_BLOCK_FACTORY_BAD,
  630. NAND_BBT_BLOCK_NUM_STATUS,
  631. };
  632. int nanddev_bbt_init(struct nand_device *nand);
  633. void nanddev_bbt_cleanup(struct nand_device *nand);
  634. int nanddev_bbt_update(struct nand_device *nand);
  635. int nanddev_bbt_get_block_status(const struct nand_device *nand,
  636. unsigned int entry);
  637. int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
  638. enum nand_bbt_block_status status);
  639. int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
  640. /**
  641. * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
  642. * @nand: NAND device
  643. * @pos: the NAND position we want to get BBT entry for
  644. *
  645. * Return the BBT entry used to store information about the eraseblock pointed
  646. * by @pos.
  647. *
  648. * Return: the BBT entry storing information about eraseblock pointed by @pos.
  649. */
  650. static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
  651. const struct nand_pos *pos)
  652. {
  653. return pos->eraseblock +
  654. ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
  655. nand->memorg.eraseblocks_per_lun);
  656. }
  657. /**
  658. * nanddev_bbt_is_initialized() - Check if the BBT has been initialized
  659. * @nand: NAND device
  660. *
  661. * Return: true if the BBT has been initialized, false otherwise.
  662. */
  663. static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
  664. {
  665. return !!nand->bbt.cache;
  666. }
  667. /* MTD -> NAND helper functions. */
  668. int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
  669. #endif /* __LINUX_MTD_NAND_H */