rfd_ftl.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. /*
  2. * rfd_ftl.c -- resident flash disk (flash translation layer)
  3. *
  4. * Copyright © 2005 Sean Young <sean@mess.org>
  5. *
  6. * This type of flash translation layer (FTL) is used by the Embedded BIOS
  7. * by General Software. It is known as the Resident Flash Disk (RFD), see:
  8. *
  9. * http://www.gensw.com/pages/prod/bios/rfd.htm
  10. *
  11. * based on ftl.c
  12. */
  13. #include <linux/hdreg.h>
  14. #include <linux/init.h>
  15. #include <linux/mtd/blktrans.h>
  16. #include <linux/mtd/mtd.h>
  17. #include <linux/vmalloc.h>
  18. #include <linux/slab.h>
  19. #include <linux/jiffies.h>
  20. #include <linux/module.h>
  21. #include <asm/types.h>
  22. static int block_size = 0;
  23. module_param(block_size, int, 0);
  24. MODULE_PARM_DESC(block_size, "Block size to use by RFD, defaults to erase unit size");
  25. #define PREFIX "rfd_ftl: "
  26. /* This major has been assigned by device@lanana.org */
  27. #ifndef RFD_FTL_MAJOR
  28. #define RFD_FTL_MAJOR 256
  29. #endif
  30. /* Maximum number of partitions in an FTL region */
  31. #define PART_BITS 4
  32. /* An erase unit should start with this value */
  33. #define RFD_MAGIC 0x9193
  34. /* the second value is 0xffff or 0xffc8; function unknown */
  35. /* the third value is always 0xffff, ignored */
  36. /* next is an array of mapping for each corresponding sector */
  37. #define HEADER_MAP_OFFSET 3
  38. #define SECTOR_DELETED 0x0000
  39. #define SECTOR_ZERO 0xfffe
  40. #define SECTOR_FREE 0xffff
  41. #define SECTOR_SIZE 512
  42. #define SECTORS_PER_TRACK 63
  43. struct block {
  44. enum {
  45. BLOCK_OK,
  46. BLOCK_ERASING,
  47. BLOCK_ERASED,
  48. BLOCK_UNUSED,
  49. BLOCK_FAILED
  50. } state;
  51. int free_sectors;
  52. int used_sectors;
  53. int erases;
  54. u_long offset;
  55. };
  56. struct partition {
  57. struct mtd_blktrans_dev mbd;
  58. u_int block_size; /* size of erase unit */
  59. u_int total_blocks; /* number of erase units */
  60. u_int header_sectors_per_block; /* header sectors in erase unit */
  61. u_int data_sectors_per_block; /* data sectors in erase unit */
  62. u_int sector_count; /* sectors in translated disk */
  63. u_int header_size; /* bytes in header sector */
  64. int reserved_block; /* block next up for reclaim */
  65. int current_block; /* block to write to */
  66. u16 *header_cache; /* cached header */
  67. int is_reclaiming;
  68. int cylinders;
  69. int errors;
  70. u_long *sector_map;
  71. struct block *blocks;
  72. };
  73. static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf);
  74. static int build_block_map(struct partition *part, int block_no)
  75. {
  76. struct block *block = &part->blocks[block_no];
  77. int i;
  78. block->offset = part->block_size * block_no;
  79. if (le16_to_cpu(part->header_cache[0]) != RFD_MAGIC) {
  80. block->state = BLOCK_UNUSED;
  81. return -ENOENT;
  82. }
  83. block->state = BLOCK_OK;
  84. for (i=0; i<part->data_sectors_per_block; i++) {
  85. u16 entry;
  86. entry = le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]);
  87. if (entry == SECTOR_DELETED)
  88. continue;
  89. if (entry == SECTOR_FREE) {
  90. block->free_sectors++;
  91. continue;
  92. }
  93. if (entry == SECTOR_ZERO)
  94. entry = 0;
  95. if (entry >= part->sector_count) {
  96. printk(KERN_WARNING PREFIX
  97. "'%s': unit #%d: entry %d corrupt, "
  98. "sector %d out of range\n",
  99. part->mbd.mtd->name, block_no, i, entry);
  100. continue;
  101. }
  102. if (part->sector_map[entry] != -1) {
  103. printk(KERN_WARNING PREFIX
  104. "'%s': more than one entry for sector %d\n",
  105. part->mbd.mtd->name, entry);
  106. part->errors = 1;
  107. continue;
  108. }
  109. part->sector_map[entry] = block->offset +
  110. (i + part->header_sectors_per_block) * SECTOR_SIZE;
  111. block->used_sectors++;
  112. }
  113. if (block->free_sectors == part->data_sectors_per_block)
  114. part->reserved_block = block_no;
  115. return 0;
  116. }
  117. static int scan_header(struct partition *part)
  118. {
  119. int sectors_per_block;
  120. int i, rc = -ENOMEM;
  121. int blocks_found;
  122. size_t retlen;
  123. sectors_per_block = part->block_size / SECTOR_SIZE;
  124. part->total_blocks = (u32)part->mbd.mtd->size / part->block_size;
  125. if (part->total_blocks < 2)
  126. return -ENOENT;
  127. /* each erase block has three bytes header, followed by the map */
  128. part->header_sectors_per_block =
  129. ((HEADER_MAP_OFFSET + sectors_per_block) *
  130. sizeof(u16) + SECTOR_SIZE - 1) / SECTOR_SIZE;
  131. part->data_sectors_per_block = sectors_per_block -
  132. part->header_sectors_per_block;
  133. part->header_size = (HEADER_MAP_OFFSET +
  134. part->data_sectors_per_block) * sizeof(u16);
  135. part->cylinders = (part->data_sectors_per_block *
  136. (part->total_blocks - 1) - 1) / SECTORS_PER_TRACK;
  137. part->sector_count = part->cylinders * SECTORS_PER_TRACK;
  138. part->current_block = -1;
  139. part->reserved_block = -1;
  140. part->is_reclaiming = 0;
  141. part->header_cache = kmalloc(part->header_size, GFP_KERNEL);
  142. if (!part->header_cache)
  143. goto err;
  144. part->blocks = kcalloc(part->total_blocks, sizeof(struct block),
  145. GFP_KERNEL);
  146. if (!part->blocks)
  147. goto err;
  148. part->sector_map = vmalloc(array_size(sizeof(u_long),
  149. part->sector_count));
  150. if (!part->sector_map) {
  151. printk(KERN_ERR PREFIX "'%s': unable to allocate memory for "
  152. "sector map", part->mbd.mtd->name);
  153. goto err;
  154. }
  155. for (i=0; i<part->sector_count; i++)
  156. part->sector_map[i] = -1;
  157. for (i=0, blocks_found=0; i<part->total_blocks; i++) {
  158. rc = mtd_read(part->mbd.mtd, i * part->block_size,
  159. part->header_size, &retlen,
  160. (u_char *)part->header_cache);
  161. if (!rc && retlen != part->header_size)
  162. rc = -EIO;
  163. if (rc)
  164. goto err;
  165. if (!build_block_map(part, i))
  166. blocks_found++;
  167. }
  168. if (blocks_found == 0) {
  169. printk(KERN_NOTICE PREFIX "no RFD magic found in '%s'\n",
  170. part->mbd.mtd->name);
  171. rc = -ENOENT;
  172. goto err;
  173. }
  174. if (part->reserved_block == -1) {
  175. printk(KERN_WARNING PREFIX "'%s': no empty erase unit found\n",
  176. part->mbd.mtd->name);
  177. part->errors = 1;
  178. }
  179. return 0;
  180. err:
  181. vfree(part->sector_map);
  182. kfree(part->header_cache);
  183. kfree(part->blocks);
  184. return rc;
  185. }
  186. static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
  187. {
  188. struct partition *part = (struct partition*)dev;
  189. u_long addr;
  190. size_t retlen;
  191. int rc;
  192. if (sector >= part->sector_count)
  193. return -EIO;
  194. addr = part->sector_map[sector];
  195. if (addr != -1) {
  196. rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
  197. (u_char *)buf);
  198. if (!rc && retlen != SECTOR_SIZE)
  199. rc = -EIO;
  200. if (rc) {
  201. printk(KERN_WARNING PREFIX "error reading '%s' at "
  202. "0x%lx\n", part->mbd.mtd->name, addr);
  203. return rc;
  204. }
  205. } else
  206. memset(buf, 0, SECTOR_SIZE);
  207. return 0;
  208. }
  209. static int erase_block(struct partition *part, int block)
  210. {
  211. struct erase_info *erase;
  212. int rc;
  213. erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL);
  214. if (!erase)
  215. return -ENOMEM;
  216. erase->addr = part->blocks[block].offset;
  217. erase->len = part->block_size;
  218. part->blocks[block].state = BLOCK_ERASING;
  219. part->blocks[block].free_sectors = 0;
  220. rc = mtd_erase(part->mbd.mtd, erase);
  221. if (rc) {
  222. printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
  223. "failed\n", (unsigned long long)erase->addr,
  224. (unsigned long long)erase->len, part->mbd.mtd->name);
  225. part->blocks[block].state = BLOCK_FAILED;
  226. part->blocks[block].free_sectors = 0;
  227. part->blocks[block].used_sectors = 0;
  228. } else {
  229. u16 magic = cpu_to_le16(RFD_MAGIC);
  230. size_t retlen;
  231. part->blocks[block].state = BLOCK_ERASED;
  232. part->blocks[block].free_sectors = part->data_sectors_per_block;
  233. part->blocks[block].used_sectors = 0;
  234. part->blocks[block].erases++;
  235. rc = mtd_write(part->mbd.mtd, part->blocks[block].offset,
  236. sizeof(magic), &retlen, (u_char *)&magic);
  237. if (!rc && retlen != sizeof(magic))
  238. rc = -EIO;
  239. if (rc) {
  240. pr_err(PREFIX "'%s': unable to write RFD header at 0x%lx\n",
  241. part->mbd.mtd->name, part->blocks[block].offset);
  242. part->blocks[block].state = BLOCK_FAILED;
  243. } else {
  244. part->blocks[block].state = BLOCK_OK;
  245. }
  246. }
  247. kfree(erase);
  248. return rc;
  249. }
  250. static int move_block_contents(struct partition *part, int block_no, u_long *old_sector)
  251. {
  252. void *sector_data;
  253. u16 *map;
  254. size_t retlen;
  255. int i, rc = -ENOMEM;
  256. part->is_reclaiming = 1;
  257. sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL);
  258. if (!sector_data)
  259. goto err3;
  260. map = kmalloc(part->header_size, GFP_KERNEL);
  261. if (!map)
  262. goto err2;
  263. rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
  264. part->header_size, &retlen, (u_char *)map);
  265. if (!rc && retlen != part->header_size)
  266. rc = -EIO;
  267. if (rc) {
  268. printk(KERN_ERR PREFIX "error reading '%s' at "
  269. "0x%lx\n", part->mbd.mtd->name,
  270. part->blocks[block_no].offset);
  271. goto err;
  272. }
  273. for (i=0; i<part->data_sectors_per_block; i++) {
  274. u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]);
  275. u_long addr;
  276. if (entry == SECTOR_FREE || entry == SECTOR_DELETED)
  277. continue;
  278. if (entry == SECTOR_ZERO)
  279. entry = 0;
  280. /* already warned about and ignored in build_block_map() */
  281. if (entry >= part->sector_count)
  282. continue;
  283. addr = part->blocks[block_no].offset +
  284. (i + part->header_sectors_per_block) * SECTOR_SIZE;
  285. if (*old_sector == addr) {
  286. *old_sector = -1;
  287. if (!part->blocks[block_no].used_sectors--) {
  288. rc = erase_block(part, block_no);
  289. break;
  290. }
  291. continue;
  292. }
  293. rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
  294. sector_data);
  295. if (!rc && retlen != SECTOR_SIZE)
  296. rc = -EIO;
  297. if (rc) {
  298. printk(KERN_ERR PREFIX "'%s': Unable to "
  299. "read sector for relocation\n",
  300. part->mbd.mtd->name);
  301. goto err;
  302. }
  303. rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part,
  304. entry, sector_data);
  305. if (rc)
  306. goto err;
  307. }
  308. err:
  309. kfree(map);
  310. err2:
  311. kfree(sector_data);
  312. err3:
  313. part->is_reclaiming = 0;
  314. return rc;
  315. }
  316. static int reclaim_block(struct partition *part, u_long *old_sector)
  317. {
  318. int block, best_block, score, old_sector_block;
  319. int rc;
  320. /* we have a race if sync doesn't exist */
  321. mtd_sync(part->mbd.mtd);
  322. score = 0x7fffffff; /* MAX_INT */
  323. best_block = -1;
  324. if (*old_sector != -1)
  325. old_sector_block = *old_sector / part->block_size;
  326. else
  327. old_sector_block = -1;
  328. for (block=0; block<part->total_blocks; block++) {
  329. int this_score;
  330. if (block == part->reserved_block)
  331. continue;
  332. /*
  333. * Postpone reclaiming if there is a free sector as
  334. * more removed sectors is more efficient (have to move
  335. * less).
  336. */
  337. if (part->blocks[block].free_sectors)
  338. return 0;
  339. this_score = part->blocks[block].used_sectors;
  340. if (block == old_sector_block)
  341. this_score--;
  342. else {
  343. /* no point in moving a full block */
  344. if (part->blocks[block].used_sectors ==
  345. part->data_sectors_per_block)
  346. continue;
  347. }
  348. this_score += part->blocks[block].erases;
  349. if (this_score < score) {
  350. best_block = block;
  351. score = this_score;
  352. }
  353. }
  354. if (best_block == -1)
  355. return -ENOSPC;
  356. part->current_block = -1;
  357. part->reserved_block = best_block;
  358. pr_debug("reclaim_block: reclaiming block #%d with %d used "
  359. "%d free sectors\n", best_block,
  360. part->blocks[best_block].used_sectors,
  361. part->blocks[best_block].free_sectors);
  362. if (part->blocks[best_block].used_sectors)
  363. rc = move_block_contents(part, best_block, old_sector);
  364. else
  365. rc = erase_block(part, best_block);
  366. return rc;
  367. }
  368. /*
  369. * IMPROVE: It would be best to choose the block with the most deleted sectors,
  370. * because if we fill that one up first it'll have the most chance of having
  371. * the least live sectors at reclaim.
  372. */
  373. static int find_free_block(struct partition *part)
  374. {
  375. int block, stop;
  376. block = part->current_block == -1 ?
  377. jiffies % part->total_blocks : part->current_block;
  378. stop = block;
  379. do {
  380. if (part->blocks[block].free_sectors &&
  381. block != part->reserved_block)
  382. return block;
  383. if (part->blocks[block].state == BLOCK_UNUSED)
  384. erase_block(part, block);
  385. if (++block >= part->total_blocks)
  386. block = 0;
  387. } while (block != stop);
  388. return -1;
  389. }
  390. static int find_writable_block(struct partition *part, u_long *old_sector)
  391. {
  392. int rc, block;
  393. size_t retlen;
  394. block = find_free_block(part);
  395. if (block == -1) {
  396. if (!part->is_reclaiming) {
  397. rc = reclaim_block(part, old_sector);
  398. if (rc)
  399. goto err;
  400. block = find_free_block(part);
  401. }
  402. if (block == -1) {
  403. rc = -ENOSPC;
  404. goto err;
  405. }
  406. }
  407. rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
  408. part->header_size, &retlen,
  409. (u_char *)part->header_cache);
  410. if (!rc && retlen != part->header_size)
  411. rc = -EIO;
  412. if (rc) {
  413. printk(KERN_ERR PREFIX "'%s': unable to read header at "
  414. "0x%lx\n", part->mbd.mtd->name,
  415. part->blocks[block].offset);
  416. goto err;
  417. }
  418. part->current_block = block;
  419. err:
  420. return rc;
  421. }
  422. static int mark_sector_deleted(struct partition *part, u_long old_addr)
  423. {
  424. int block, offset, rc;
  425. u_long addr;
  426. size_t retlen;
  427. u16 del = cpu_to_le16(SECTOR_DELETED);
  428. block = old_addr / part->block_size;
  429. offset = (old_addr % part->block_size) / SECTOR_SIZE -
  430. part->header_sectors_per_block;
  431. addr = part->blocks[block].offset +
  432. (HEADER_MAP_OFFSET + offset) * sizeof(u16);
  433. rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
  434. (u_char *)&del);
  435. if (!rc && retlen != sizeof(del))
  436. rc = -EIO;
  437. if (rc) {
  438. printk(KERN_ERR PREFIX "error writing '%s' at "
  439. "0x%lx\n", part->mbd.mtd->name, addr);
  440. goto err;
  441. }
  442. if (block == part->current_block)
  443. part->header_cache[offset + HEADER_MAP_OFFSET] = del;
  444. part->blocks[block].used_sectors--;
  445. if (!part->blocks[block].used_sectors &&
  446. !part->blocks[block].free_sectors)
  447. rc = erase_block(part, block);
  448. err:
  449. return rc;
  450. }
  451. static int find_free_sector(const struct partition *part, const struct block *block)
  452. {
  453. int i, stop;
  454. i = stop = part->data_sectors_per_block - block->free_sectors;
  455. do {
  456. if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i])
  457. == SECTOR_FREE)
  458. return i;
  459. if (++i == part->data_sectors_per_block)
  460. i = 0;
  461. }
  462. while(i != stop);
  463. return -1;
  464. }
  465. static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr)
  466. {
  467. struct partition *part = (struct partition*)dev;
  468. struct block *block;
  469. u_long addr;
  470. int i;
  471. int rc;
  472. size_t retlen;
  473. u16 entry;
  474. if (part->current_block == -1 ||
  475. !part->blocks[part->current_block].free_sectors) {
  476. rc = find_writable_block(part, old_addr);
  477. if (rc)
  478. goto err;
  479. }
  480. block = &part->blocks[part->current_block];
  481. i = find_free_sector(part, block);
  482. if (i < 0) {
  483. rc = -ENOSPC;
  484. goto err;
  485. }
  486. addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
  487. block->offset;
  488. rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
  489. (u_char *)buf);
  490. if (!rc && retlen != SECTOR_SIZE)
  491. rc = -EIO;
  492. if (rc) {
  493. printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
  494. part->mbd.mtd->name, addr);
  495. goto err;
  496. }
  497. part->sector_map[sector] = addr;
  498. entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector);
  499. part->header_cache[i + HEADER_MAP_OFFSET] = entry;
  500. addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
  501. rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
  502. (u_char *)&entry);
  503. if (!rc && retlen != sizeof(entry))
  504. rc = -EIO;
  505. if (rc) {
  506. printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n",
  507. part->mbd.mtd->name, addr);
  508. goto err;
  509. }
  510. block->used_sectors++;
  511. block->free_sectors--;
  512. err:
  513. return rc;
  514. }
  515. static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf)
  516. {
  517. struct partition *part = (struct partition*)dev;
  518. u_long old_addr;
  519. int i;
  520. int rc = 0;
  521. pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector);
  522. if (part->reserved_block == -1) {
  523. rc = -EACCES;
  524. goto err;
  525. }
  526. if (sector >= part->sector_count) {
  527. rc = -EIO;
  528. goto err;
  529. }
  530. old_addr = part->sector_map[sector];
  531. for (i=0; i<SECTOR_SIZE; i++) {
  532. if (!buf[i])
  533. continue;
  534. rc = do_writesect(dev, sector, buf, &old_addr);
  535. if (rc)
  536. goto err;
  537. break;
  538. }
  539. if (i == SECTOR_SIZE)
  540. part->sector_map[sector] = -1;
  541. if (old_addr != -1)
  542. rc = mark_sector_deleted(part, old_addr);
  543. err:
  544. return rc;
  545. }
  546. static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo)
  547. {
  548. struct partition *part = (struct partition*)dev;
  549. geo->heads = 1;
  550. geo->sectors = SECTORS_PER_TRACK;
  551. geo->cylinders = part->cylinders;
  552. return 0;
  553. }
  554. static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
  555. {
  556. struct partition *part;
  557. if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX)
  558. return;
  559. part = kzalloc(sizeof(struct partition), GFP_KERNEL);
  560. if (!part)
  561. return;
  562. part->mbd.mtd = mtd;
  563. if (block_size)
  564. part->block_size = block_size;
  565. else {
  566. if (!mtd->erasesize) {
  567. printk(KERN_WARNING PREFIX "please provide block_size");
  568. goto out;
  569. } else
  570. part->block_size = mtd->erasesize;
  571. }
  572. if (scan_header(part) == 0) {
  573. part->mbd.size = part->sector_count;
  574. part->mbd.tr = tr;
  575. part->mbd.devnum = -1;
  576. if (!(mtd->flags & MTD_WRITEABLE))
  577. part->mbd.readonly = 1;
  578. else if (part->errors) {
  579. printk(KERN_WARNING PREFIX "'%s': errors found, "
  580. "setting read-only\n", mtd->name);
  581. part->mbd.readonly = 1;
  582. }
  583. printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n",
  584. mtd->name, mtd->type, mtd->flags);
  585. if (!add_mtd_blktrans_dev((void*)part))
  586. return;
  587. }
  588. out:
  589. kfree(part);
  590. }
  591. static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev)
  592. {
  593. struct partition *part = (struct partition*)dev;
  594. int i;
  595. for (i=0; i<part->total_blocks; i++) {
  596. pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n",
  597. part->mbd.mtd->name, i, part->blocks[i].erases);
  598. }
  599. del_mtd_blktrans_dev(dev);
  600. vfree(part->sector_map);
  601. kfree(part->header_cache);
  602. kfree(part->blocks);
  603. }
  604. static struct mtd_blktrans_ops rfd_ftl_tr = {
  605. .name = "rfd",
  606. .major = RFD_FTL_MAJOR,
  607. .part_bits = PART_BITS,
  608. .blksize = SECTOR_SIZE,
  609. .readsect = rfd_ftl_readsect,
  610. .writesect = rfd_ftl_writesect,
  611. .getgeo = rfd_ftl_getgeo,
  612. .add_mtd = rfd_ftl_add_mtd,
  613. .remove_dev = rfd_ftl_remove_dev,
  614. .owner = THIS_MODULE,
  615. };
  616. static int __init init_rfd_ftl(void)
  617. {
  618. return register_mtd_blktrans(&rfd_ftl_tr);
  619. }
  620. static void __exit cleanup_rfd_ftl(void)
  621. {
  622. deregister_mtd_blktrans(&rfd_ftl_tr);
  623. }
  624. module_init(init_rfd_ftl);
  625. module_exit(cleanup_rfd_ftl);
  626. MODULE_LICENSE("GPL");
  627. MODULE_AUTHOR("Sean Young <sean@mess.org>");
  628. MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, "
  629. "used by General Software's Embedded BIOS");