lpddr_cmds.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791
  1. /*
  2. * LPDDR flash memory device operations. This module provides read, write,
  3. * erase, lock/unlock support for LPDDR flash memories
  4. * (C) 2008 Korolev Alexey <akorolev@infradead.org>
  5. * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
  6. * Many thanks to Roman Borisov for initial enabling
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version 2
  11. * of the License, or (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  21. * 02110-1301, USA.
  22. * TODO:
  23. * Implement VPP management
  24. * Implement XIP support
  25. * Implement OTP support
  26. */
  27. #include <linux/mtd/pfow.h>
  28. #include <linux/mtd/qinfo.h>
  29. #include <linux/slab.h>
  30. static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
  31. size_t *retlen, u_char *buf);
  32. static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
  33. size_t len, size_t *retlen, const u_char *buf);
  34. static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
  35. unsigned long count, loff_t to, size_t *retlen);
  36. static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr);
  37. static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  38. static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  39. static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
  40. size_t *retlen, void **mtdbuf, resource_size_t *phys);
  41. static void lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
  42. static int get_chip(struct map_info *map, struct flchip *chip, int mode);
  43. static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
  44. static void put_chip(struct map_info *map, struct flchip *chip);
  45. struct mtd_info *lpddr_cmdset(struct map_info *map)
  46. {
  47. struct lpddr_private *lpddr = map->fldrv_priv;
  48. struct flchip_shared *shared;
  49. struct flchip *chip;
  50. struct mtd_info *mtd;
  51. int numchips;
  52. int i, j;
  53. mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
  54. if (!mtd) {
  55. printk(KERN_ERR "Failed to allocate memory for MTD device\n");
  56. return NULL;
  57. }
  58. mtd->priv = map;
  59. mtd->type = MTD_NORFLASH;
  60. /* Fill in the default mtd operations */
  61. mtd->read = lpddr_read;
  62. mtd->type = MTD_NORFLASH;
  63. mtd->flags = MTD_CAP_NORFLASH;
  64. mtd->flags &= ~MTD_BIT_WRITEABLE;
  65. mtd->erase = lpddr_erase;
  66. mtd->write = lpddr_write_buffers;
  67. mtd->writev = lpddr_writev;
  68. mtd->read_oob = NULL;
  69. mtd->write_oob = NULL;
  70. mtd->sync = NULL;
  71. mtd->lock = lpddr_lock;
  72. mtd->unlock = lpddr_unlock;
  73. mtd->suspend = NULL;
  74. mtd->resume = NULL;
  75. if (map_is_linear(map)) {
  76. mtd->point = lpddr_point;
  77. mtd->unpoint = lpddr_unpoint;
  78. }
  79. mtd->block_isbad = NULL;
  80. mtd->block_markbad = NULL;
  81. mtd->size = 1 << lpddr->qinfo->DevSizeShift;
  82. mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
  83. mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
  84. shared = kmalloc(sizeof(struct flchip_shared) * lpddr->numchips,
  85. GFP_KERNEL);
  86. if (!shared) {
  87. kfree(lpddr);
  88. kfree(mtd);
  89. return NULL;
  90. }
  91. chip = &lpddr->chips[0];
  92. numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
  93. for (i = 0; i < numchips; i++) {
  94. shared[i].writing = shared[i].erasing = NULL;
  95. mutex_init(&shared[i].lock);
  96. for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
  97. *chip = lpddr->chips[i];
  98. chip->start += j << lpddr->chipshift;
  99. chip->oldstate = chip->state = FL_READY;
  100. chip->priv = &shared[i];
  101. /* those should be reset too since
  102. they create memory references. */
  103. init_waitqueue_head(&chip->wq);
  104. mutex_init(&chip->mutex);
  105. chip++;
  106. }
  107. }
  108. return mtd;
  109. }
  110. EXPORT_SYMBOL(lpddr_cmdset);
  111. static int wait_for_ready(struct map_info *map, struct flchip *chip,
  112. unsigned int chip_op_time)
  113. {
  114. unsigned int timeo, reset_timeo, sleep_time;
  115. unsigned int dsr;
  116. flstate_t chip_state = chip->state;
  117. int ret = 0;
  118. /* set our timeout to 8 times the expected delay */
  119. timeo = chip_op_time * 8;
  120. if (!timeo)
  121. timeo = 500000;
  122. reset_timeo = timeo;
  123. sleep_time = chip_op_time / 2;
  124. for (;;) {
  125. dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
  126. if (dsr & DSR_READY_STATUS)
  127. break;
  128. if (!timeo) {
  129. printk(KERN_ERR "%s: Flash timeout error state %d \n",
  130. map->name, chip_state);
  131. ret = -ETIME;
  132. break;
  133. }
  134. /* OK Still waiting. Drop the lock, wait a while and retry. */
  135. mutex_unlock(&chip->mutex);
  136. if (sleep_time >= 1000000/HZ) {
  137. /*
  138. * Half of the normal delay still remaining
  139. * can be performed with a sleeping delay instead
  140. * of busy waiting.
  141. */
  142. msleep(sleep_time/1000);
  143. timeo -= sleep_time;
  144. sleep_time = 1000000/HZ;
  145. } else {
  146. udelay(1);
  147. cond_resched();
  148. timeo--;
  149. }
  150. mutex_lock(&chip->mutex);
  151. while (chip->state != chip_state) {
  152. /* Someone's suspended the operation: sleep */
  153. DECLARE_WAITQUEUE(wait, current);
  154. set_current_state(TASK_UNINTERRUPTIBLE);
  155. add_wait_queue(&chip->wq, &wait);
  156. mutex_unlock(&chip->mutex);
  157. schedule();
  158. remove_wait_queue(&chip->wq, &wait);
  159. mutex_lock(&chip->mutex);
  160. }
  161. if (chip->erase_suspended || chip->write_suspended) {
  162. /* Suspend has occurred while sleep: reset timeout */
  163. timeo = reset_timeo;
  164. chip->erase_suspended = chip->write_suspended = 0;
  165. }
  166. }
  167. /* check status for errors */
  168. if (dsr & DSR_ERR) {
  169. /* Clear DSR*/
  170. map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
  171. printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
  172. map->name, dsr);
  173. print_drs_error(dsr);
  174. ret = -EIO;
  175. }
  176. chip->state = FL_READY;
  177. return ret;
  178. }
  179. static int get_chip(struct map_info *map, struct flchip *chip, int mode)
  180. {
  181. int ret;
  182. DECLARE_WAITQUEUE(wait, current);
  183. retry:
  184. if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)
  185. && chip->state != FL_SYNCING) {
  186. /*
  187. * OK. We have possibility for contension on the write/erase
  188. * operations which are global to the real chip and not per
  189. * partition. So let's fight it over in the partition which
  190. * currently has authority on the operation.
  191. *
  192. * The rules are as follows:
  193. *
  194. * - any write operation must own shared->writing.
  195. *
  196. * - any erase operation must own _both_ shared->writing and
  197. * shared->erasing.
  198. *
  199. * - contension arbitration is handled in the owner's context.
  200. *
  201. * The 'shared' struct can be read and/or written only when
  202. * its lock is taken.
  203. */
  204. struct flchip_shared *shared = chip->priv;
  205. struct flchip *contender;
  206. mutex_lock(&shared->lock);
  207. contender = shared->writing;
  208. if (contender && contender != chip) {
  209. /*
  210. * The engine to perform desired operation on this
  211. * partition is already in use by someone else.
  212. * Let's fight over it in the context of the chip
  213. * currently using it. If it is possible to suspend,
  214. * that other partition will do just that, otherwise
  215. * it'll happily send us to sleep. In any case, when
  216. * get_chip returns success we're clear to go ahead.
  217. */
  218. ret = mutex_trylock(&contender->mutex);
  219. mutex_unlock(&shared->lock);
  220. if (!ret)
  221. goto retry;
  222. mutex_unlock(&chip->mutex);
  223. ret = chip_ready(map, contender, mode);
  224. mutex_lock(&chip->mutex);
  225. if (ret == -EAGAIN) {
  226. mutex_unlock(&contender->mutex);
  227. goto retry;
  228. }
  229. if (ret) {
  230. mutex_unlock(&contender->mutex);
  231. return ret;
  232. }
  233. mutex_lock(&shared->lock);
  234. /* We should not own chip if it is already in FL_SYNCING
  235. * state. Put contender and retry. */
  236. if (chip->state == FL_SYNCING) {
  237. put_chip(map, contender);
  238. mutex_unlock(&contender->mutex);
  239. goto retry;
  240. }
  241. mutex_unlock(&contender->mutex);
  242. }
  243. /* Check if we have suspended erase on this chip.
  244. Must sleep in such a case. */
  245. if (mode == FL_ERASING && shared->erasing
  246. && shared->erasing->oldstate == FL_ERASING) {
  247. mutex_unlock(&shared->lock);
  248. set_current_state(TASK_UNINTERRUPTIBLE);
  249. add_wait_queue(&chip->wq, &wait);
  250. mutex_unlock(&chip->mutex);
  251. schedule();
  252. remove_wait_queue(&chip->wq, &wait);
  253. mutex_lock(&chip->mutex);
  254. goto retry;
  255. }
  256. /* We now own it */
  257. shared->writing = chip;
  258. if (mode == FL_ERASING)
  259. shared->erasing = chip;
  260. mutex_unlock(&shared->lock);
  261. }
  262. ret = chip_ready(map, chip, mode);
  263. if (ret == -EAGAIN)
  264. goto retry;
  265. return ret;
  266. }
  267. static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
  268. {
  269. struct lpddr_private *lpddr = map->fldrv_priv;
  270. int ret = 0;
  271. DECLARE_WAITQUEUE(wait, current);
  272. /* Prevent setting state FL_SYNCING for chip in suspended state. */
  273. if (FL_SYNCING == mode && FL_READY != chip->oldstate)
  274. goto sleep;
  275. switch (chip->state) {
  276. case FL_READY:
  277. case FL_JEDEC_QUERY:
  278. return 0;
  279. case FL_ERASING:
  280. if (!lpddr->qinfo->SuspEraseSupp ||
  281. !(mode == FL_READY || mode == FL_POINT))
  282. goto sleep;
  283. map_write(map, CMD(LPDDR_SUSPEND),
  284. map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
  285. chip->oldstate = FL_ERASING;
  286. chip->state = FL_ERASE_SUSPENDING;
  287. ret = wait_for_ready(map, chip, 0);
  288. if (ret) {
  289. /* Oops. something got wrong. */
  290. /* Resume and pretend we weren't here. */
  291. put_chip(map, chip);
  292. printk(KERN_ERR "%s: suspend operation failed."
  293. "State may be wrong \n", map->name);
  294. return -EIO;
  295. }
  296. chip->erase_suspended = 1;
  297. chip->state = FL_READY;
  298. return 0;
  299. /* Erase suspend */
  300. case FL_POINT:
  301. /* Only if there's no operation suspended... */
  302. if (mode == FL_READY && chip->oldstate == FL_READY)
  303. return 0;
  304. default:
  305. sleep:
  306. set_current_state(TASK_UNINTERRUPTIBLE);
  307. add_wait_queue(&chip->wq, &wait);
  308. mutex_unlock(&chip->mutex);
  309. schedule();
  310. remove_wait_queue(&chip->wq, &wait);
  311. mutex_lock(&chip->mutex);
  312. return -EAGAIN;
  313. }
  314. }
  315. static void put_chip(struct map_info *map, struct flchip *chip)
  316. {
  317. if (chip->priv) {
  318. struct flchip_shared *shared = chip->priv;
  319. mutex_lock(&shared->lock);
  320. if (shared->writing == chip && chip->oldstate == FL_READY) {
  321. /* We own the ability to write, but we're done */
  322. shared->writing = shared->erasing;
  323. if (shared->writing && shared->writing != chip) {
  324. /* give back the ownership */
  325. struct flchip *loaner = shared->writing;
  326. mutex_lock(&loaner->mutex);
  327. mutex_unlock(&shared->lock);
  328. mutex_unlock(&chip->mutex);
  329. put_chip(map, loaner);
  330. mutex_lock(&chip->mutex);
  331. mutex_unlock(&loaner->mutex);
  332. wake_up(&chip->wq);
  333. return;
  334. }
  335. shared->erasing = NULL;
  336. shared->writing = NULL;
  337. } else if (shared->erasing == chip && shared->writing != chip) {
  338. /*
  339. * We own the ability to erase without the ability
  340. * to write, which means the erase was suspended
  341. * and some other partition is currently writing.
  342. * Don't let the switch below mess things up since
  343. * we don't have ownership to resume anything.
  344. */
  345. mutex_unlock(&shared->lock);
  346. wake_up(&chip->wq);
  347. return;
  348. }
  349. mutex_unlock(&shared->lock);
  350. }
  351. switch (chip->oldstate) {
  352. case FL_ERASING:
  353. map_write(map, CMD(LPDDR_RESUME),
  354. map->pfow_base + PFOW_COMMAND_CODE);
  355. map_write(map, CMD(LPDDR_START_EXECUTION),
  356. map->pfow_base + PFOW_COMMAND_EXECUTE);
  357. chip->oldstate = FL_READY;
  358. chip->state = FL_ERASING;
  359. break;
  360. case FL_READY:
  361. break;
  362. default:
  363. printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n",
  364. map->name, chip->oldstate);
  365. }
  366. wake_up(&chip->wq);
  367. }
  368. int do_write_buffer(struct map_info *map, struct flchip *chip,
  369. unsigned long adr, const struct kvec **pvec,
  370. unsigned long *pvec_seek, int len)
  371. {
  372. struct lpddr_private *lpddr = map->fldrv_priv;
  373. map_word datum;
  374. int ret, wbufsize, word_gap, words;
  375. const struct kvec *vec;
  376. unsigned long vec_seek;
  377. unsigned long prog_buf_ofs;
  378. wbufsize = 1 << lpddr->qinfo->BufSizeShift;
  379. mutex_lock(&chip->mutex);
  380. ret = get_chip(map, chip, FL_WRITING);
  381. if (ret) {
  382. mutex_unlock(&chip->mutex);
  383. return ret;
  384. }
  385. /* Figure out the number of words to write */
  386. word_gap = (-adr & (map_bankwidth(map)-1));
  387. words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
  388. if (!word_gap) {
  389. words--;
  390. } else {
  391. word_gap = map_bankwidth(map) - word_gap;
  392. adr -= word_gap;
  393. datum = map_word_ff(map);
  394. }
  395. /* Write data */
  396. /* Get the program buffer offset from PFOW register data first*/
  397. prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
  398. map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
  399. vec = *pvec;
  400. vec_seek = *pvec_seek;
  401. do {
  402. int n = map_bankwidth(map) - word_gap;
  403. if (n > vec->iov_len - vec_seek)
  404. n = vec->iov_len - vec_seek;
  405. if (n > len)
  406. n = len;
  407. if (!word_gap && (len < map_bankwidth(map)))
  408. datum = map_word_ff(map);
  409. datum = map_word_load_partial(map, datum,
  410. vec->iov_base + vec_seek, word_gap, n);
  411. len -= n;
  412. word_gap += n;
  413. if (!len || word_gap == map_bankwidth(map)) {
  414. map_write(map, datum, prog_buf_ofs);
  415. prog_buf_ofs += map_bankwidth(map);
  416. word_gap = 0;
  417. }
  418. vec_seek += n;
  419. if (vec_seek == vec->iov_len) {
  420. vec++;
  421. vec_seek = 0;
  422. }
  423. } while (len);
  424. *pvec = vec;
  425. *pvec_seek = vec_seek;
  426. /* GO GO GO */
  427. send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
  428. chip->state = FL_WRITING;
  429. ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
  430. if (ret) {
  431. printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
  432. map->name, ret, adr);
  433. goto out;
  434. }
  435. out: put_chip(map, chip);
  436. mutex_unlock(&chip->mutex);
  437. return ret;
  438. }
  439. int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
  440. {
  441. struct map_info *map = mtd->priv;
  442. struct lpddr_private *lpddr = map->fldrv_priv;
  443. int chipnum = adr >> lpddr->chipshift;
  444. struct flchip *chip = &lpddr->chips[chipnum];
  445. int ret;
  446. mutex_lock(&chip->mutex);
  447. ret = get_chip(map, chip, FL_ERASING);
  448. if (ret) {
  449. mutex_unlock(&chip->mutex);
  450. return ret;
  451. }
  452. send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
  453. chip->state = FL_ERASING;
  454. ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
  455. if (ret) {
  456. printk(KERN_WARNING"%s Erase block error %d at : %llx\n",
  457. map->name, ret, adr);
  458. goto out;
  459. }
  460. out: put_chip(map, chip);
  461. mutex_unlock(&chip->mutex);
  462. return ret;
  463. }
  464. static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
  465. size_t *retlen, u_char *buf)
  466. {
  467. struct map_info *map = mtd->priv;
  468. struct lpddr_private *lpddr = map->fldrv_priv;
  469. int chipnum = adr >> lpddr->chipshift;
  470. struct flchip *chip = &lpddr->chips[chipnum];
  471. int ret = 0;
  472. mutex_lock(&chip->mutex);
  473. ret = get_chip(map, chip, FL_READY);
  474. if (ret) {
  475. mutex_unlock(&chip->mutex);
  476. return ret;
  477. }
  478. map_copy_from(map, buf, adr, len);
  479. *retlen = len;
  480. put_chip(map, chip);
  481. mutex_unlock(&chip->mutex);
  482. return ret;
  483. }
  484. static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
  485. size_t *retlen, void **mtdbuf, resource_size_t *phys)
  486. {
  487. struct map_info *map = mtd->priv;
  488. struct lpddr_private *lpddr = map->fldrv_priv;
  489. int chipnum = adr >> lpddr->chipshift;
  490. unsigned long ofs, last_end = 0;
  491. struct flchip *chip = &lpddr->chips[chipnum];
  492. int ret = 0;
  493. if (!map->virt || (adr + len > mtd->size))
  494. return -EINVAL;
  495. /* ofs: offset within the first chip that the first read should start */
  496. ofs = adr - (chipnum << lpddr->chipshift);
  497. *mtdbuf = (void *)map->virt + chip->start + ofs;
  498. *retlen = 0;
  499. while (len) {
  500. unsigned long thislen;
  501. if (chipnum >= lpddr->numchips)
  502. break;
  503. /* We cannot point across chips that are virtually disjoint */
  504. if (!last_end)
  505. last_end = chip->start;
  506. else if (chip->start != last_end)
  507. break;
  508. if ((len + ofs - 1) >> lpddr->chipshift)
  509. thislen = (1<<lpddr->chipshift) - ofs;
  510. else
  511. thislen = len;
  512. /* get the chip */
  513. mutex_lock(&chip->mutex);
  514. ret = get_chip(map, chip, FL_POINT);
  515. mutex_unlock(&chip->mutex);
  516. if (ret)
  517. break;
  518. chip->state = FL_POINT;
  519. chip->ref_point_counter++;
  520. *retlen += thislen;
  521. len -= thislen;
  522. ofs = 0;
  523. last_end += 1 << lpddr->chipshift;
  524. chipnum++;
  525. chip = &lpddr->chips[chipnum];
  526. }
  527. return 0;
  528. }
  529. static void lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
  530. {
  531. struct map_info *map = mtd->priv;
  532. struct lpddr_private *lpddr = map->fldrv_priv;
  533. int chipnum = adr >> lpddr->chipshift;
  534. unsigned long ofs;
  535. /* ofs: offset within the first chip that the first read should start */
  536. ofs = adr - (chipnum << lpddr->chipshift);
  537. while (len) {
  538. unsigned long thislen;
  539. struct flchip *chip;
  540. chip = &lpddr->chips[chipnum];
  541. if (chipnum >= lpddr->numchips)
  542. break;
  543. if ((len + ofs - 1) >> lpddr->chipshift)
  544. thislen = (1<<lpddr->chipshift) - ofs;
  545. else
  546. thislen = len;
  547. mutex_lock(&chip->mutex);
  548. if (chip->state == FL_POINT) {
  549. chip->ref_point_counter--;
  550. if (chip->ref_point_counter == 0)
  551. chip->state = FL_READY;
  552. } else
  553. printk(KERN_WARNING "%s: Warning: unpoint called on non"
  554. "pointed region\n", map->name);
  555. put_chip(map, chip);
  556. mutex_unlock(&chip->mutex);
  557. len -= thislen;
  558. ofs = 0;
  559. chipnum++;
  560. }
  561. }
  562. static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
  563. size_t *retlen, const u_char *buf)
  564. {
  565. struct kvec vec;
  566. vec.iov_base = (void *) buf;
  567. vec.iov_len = len;
  568. return lpddr_writev(mtd, &vec, 1, to, retlen);
  569. }
  570. static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
  571. unsigned long count, loff_t to, size_t *retlen)
  572. {
  573. struct map_info *map = mtd->priv;
  574. struct lpddr_private *lpddr = map->fldrv_priv;
  575. int ret = 0;
  576. int chipnum;
  577. unsigned long ofs, vec_seek, i;
  578. int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
  579. size_t len = 0;
  580. for (i = 0; i < count; i++)
  581. len += vecs[i].iov_len;
  582. *retlen = 0;
  583. if (!len)
  584. return 0;
  585. chipnum = to >> lpddr->chipshift;
  586. ofs = to;
  587. vec_seek = 0;
  588. do {
  589. /* We must not cross write block boundaries */
  590. int size = wbufsize - (ofs & (wbufsize-1));
  591. if (size > len)
  592. size = len;
  593. ret = do_write_buffer(map, &lpddr->chips[chipnum],
  594. ofs, &vecs, &vec_seek, size);
  595. if (ret)
  596. return ret;
  597. ofs += size;
  598. (*retlen) += size;
  599. len -= size;
  600. /* Be nice and reschedule with the chip in a usable
  601. * state for other processes */
  602. cond_resched();
  603. } while (len);
  604. return 0;
  605. }
  606. static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
  607. {
  608. unsigned long ofs, len;
  609. int ret;
  610. struct map_info *map = mtd->priv;
  611. struct lpddr_private *lpddr = map->fldrv_priv;
  612. int size = 1 << lpddr->qinfo->UniformBlockSizeShift;
  613. ofs = instr->addr;
  614. len = instr->len;
  615. if (ofs > mtd->size || (len + ofs) > mtd->size)
  616. return -EINVAL;
  617. while (len > 0) {
  618. ret = do_erase_oneblock(mtd, ofs);
  619. if (ret)
  620. return ret;
  621. ofs += size;
  622. len -= size;
  623. }
  624. instr->state = MTD_ERASE_DONE;
  625. mtd_erase_callback(instr);
  626. return 0;
  627. }
  628. #define DO_XXLOCK_LOCK 1
  629. #define DO_XXLOCK_UNLOCK 2
  630. int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
  631. {
  632. int ret = 0;
  633. struct map_info *map = mtd->priv;
  634. struct lpddr_private *lpddr = map->fldrv_priv;
  635. int chipnum = adr >> lpddr->chipshift;
  636. struct flchip *chip = &lpddr->chips[chipnum];
  637. mutex_lock(&chip->mutex);
  638. ret = get_chip(map, chip, FL_LOCKING);
  639. if (ret) {
  640. mutex_unlock(&chip->mutex);
  641. return ret;
  642. }
  643. if (thunk == DO_XXLOCK_LOCK) {
  644. send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
  645. chip->state = FL_LOCKING;
  646. } else if (thunk == DO_XXLOCK_UNLOCK) {
  647. send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
  648. chip->state = FL_UNLOCKING;
  649. } else
  650. BUG();
  651. ret = wait_for_ready(map, chip, 1);
  652. if (ret) {
  653. printk(KERN_ERR "%s: block unlock error status %d \n",
  654. map->name, ret);
  655. goto out;
  656. }
  657. out: put_chip(map, chip);
  658. mutex_unlock(&chip->mutex);
  659. return ret;
  660. }
  661. static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  662. {
  663. return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK);
  664. }
  665. static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
  666. {
  667. return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
  668. }
  669. int word_program(struct map_info *map, loff_t adr, uint32_t curval)
  670. {
  671. int ret;
  672. struct lpddr_private *lpddr = map->fldrv_priv;
  673. int chipnum = adr >> lpddr->chipshift;
  674. struct flchip *chip = &lpddr->chips[chipnum];
  675. mutex_lock(&chip->mutex);
  676. ret = get_chip(map, chip, FL_WRITING);
  677. if (ret) {
  678. mutex_unlock(&chip->mutex);
  679. return ret;
  680. }
  681. send_pfow_command(map, LPDDR_WORD_PROGRAM, adr, 0x00, (map_word *)&curval);
  682. ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->SingleWordProgTime));
  683. if (ret) {
  684. printk(KERN_WARNING"%s word_program error at: %llx; val: %x\n",
  685. map->name, adr, curval);
  686. goto out;
  687. }
  688. out: put_chip(map, chip);
  689. mutex_unlock(&chip->mutex);
  690. return ret;
  691. }
  692. MODULE_LICENSE("GPL");
  693. MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
  694. MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");