mmc_ops.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115
  1. /*
  2. * linux/drivers/mmc/core/mmc_ops.h
  3. *
  4. * Copyright 2006-2007 Pierre Ossman
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or (at
  9. * your option) any later version.
  10. */
  11. #include <linux/slab.h>
  12. #include <linux/export.h>
  13. #include <linux/types.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/mmc/host.h>
  16. #include <linux/mmc/card.h>
  17. #include <linux/mmc/mmc.h>
  18. #include "core.h"
  19. #include "card.h"
  20. #include "host.h"
  21. #include "mmc_ops.h"
  22. #define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
  23. static const u8 tuning_blk_pattern_4bit[] = {
  24. 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
  25. 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
  26. 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
  27. 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
  28. 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
  29. 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
  30. 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
  31. 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
  32. };
  33. static const u8 tuning_blk_pattern_8bit[] = {
  34. 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
  35. 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
  36. 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
  37. 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
  38. 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
  39. 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
  40. 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
  41. 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
  42. 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
  43. 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
  44. 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
  45. 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
  46. 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
  47. 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
  48. 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
  49. 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
  50. };
  51. int __mmc_send_status(struct mmc_card *card, u32 *status, unsigned int retries)
  52. {
  53. int err;
  54. struct mmc_command cmd = {};
  55. cmd.opcode = MMC_SEND_STATUS;
  56. if (!mmc_host_is_spi(card->host))
  57. cmd.arg = card->rca << 16;
  58. cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  59. err = mmc_wait_for_cmd(card->host, &cmd, retries);
  60. if (err)
  61. return err;
  62. /* NOTE: callers are required to understand the difference
  63. * between "native" and SPI format status words!
  64. */
  65. if (status)
  66. *status = cmd.resp[0];
  67. return 0;
  68. }
  69. EXPORT_SYMBOL_GPL(__mmc_send_status);
  70. int mmc_send_status(struct mmc_card *card, u32 *status)
  71. {
  72. return __mmc_send_status(card, status, MMC_CMD_RETRIES);
  73. }
  74. EXPORT_SYMBOL_GPL(mmc_send_status);
  75. static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
  76. {
  77. struct mmc_command cmd = {};
  78. cmd.opcode = MMC_SELECT_CARD;
  79. if (card) {
  80. cmd.arg = card->rca << 16;
  81. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  82. } else {
  83. cmd.arg = 0;
  84. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  85. }
  86. return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  87. }
  88. int mmc_select_card(struct mmc_card *card)
  89. {
  90. return _mmc_select_card(card->host, card);
  91. }
  92. int mmc_deselect_cards(struct mmc_host *host)
  93. {
  94. return _mmc_select_card(host, NULL);
  95. }
  96. /*
  97. * Write the value specified in the device tree or board code into the optional
  98. * 16 bit Driver Stage Register. This can be used to tune raise/fall times and
  99. * drive strength of the DAT and CMD outputs. The actual meaning of a given
  100. * value is hardware dependant.
  101. * The presence of the DSR register can be determined from the CSD register,
  102. * bit 76.
  103. */
  104. int mmc_set_dsr(struct mmc_host *host)
  105. {
  106. struct mmc_command cmd = {};
  107. cmd.opcode = MMC_SET_DSR;
  108. cmd.arg = (host->dsr << 16) | 0xffff;
  109. cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
  110. return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  111. }
  112. int mmc_go_idle(struct mmc_host *host)
  113. {
  114. int err;
  115. struct mmc_command cmd = {};
  116. /*
  117. * Non-SPI hosts need to prevent chipselect going active during
  118. * GO_IDLE; that would put chips into SPI mode. Remind them of
  119. * that in case of hardware that won't pull up DAT3/nCS otherwise.
  120. *
  121. * SPI hosts ignore ios.chip_select; it's managed according to
  122. * rules that must accommodate non-MMC slaves which this layer
  123. * won't even know about.
  124. */
  125. if (!mmc_host_is_spi(host)) {
  126. mmc_set_chip_select(host, MMC_CS_HIGH);
  127. mmc_delay(1);
  128. }
  129. cmd.opcode = MMC_GO_IDLE_STATE;
  130. cmd.arg = 0;
  131. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC;
  132. err = mmc_wait_for_cmd(host, &cmd, 0);
  133. mmc_delay(1);
  134. if (!mmc_host_is_spi(host)) {
  135. mmc_set_chip_select(host, MMC_CS_DONTCARE);
  136. mmc_delay(1);
  137. }
  138. host->use_spi_crc = 0;
  139. return err;
  140. }
  141. int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
  142. {
  143. struct mmc_command cmd = {};
  144. int i, err = 0;
  145. cmd.opcode = MMC_SEND_OP_COND;
  146. cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;
  147. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR;
  148. for (i = 100; i; i--) {
  149. err = mmc_wait_for_cmd(host, &cmd, 0);
  150. if (err)
  151. break;
  152. /* if we're just probing, do a single pass */
  153. if (ocr == 0)
  154. break;
  155. /* otherwise wait until reset completes */
  156. if (mmc_host_is_spi(host)) {
  157. if (!(cmd.resp[0] & R1_SPI_IDLE))
  158. break;
  159. } else {
  160. if (cmd.resp[0] & MMC_CARD_BUSY)
  161. break;
  162. }
  163. err = -ETIMEDOUT;
  164. mmc_delay(10);
  165. }
  166. if (rocr && !mmc_host_is_spi(host))
  167. *rocr = cmd.resp[0];
  168. return err;
  169. }
  170. int mmc_set_relative_addr(struct mmc_card *card)
  171. {
  172. struct mmc_command cmd = {};
  173. cmd.opcode = MMC_SET_RELATIVE_ADDR;
  174. cmd.arg = card->rca << 16;
  175. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  176. return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
  177. }
  178. static int
  179. mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)
  180. {
  181. int err;
  182. struct mmc_command cmd = {};
  183. cmd.opcode = opcode;
  184. cmd.arg = arg;
  185. cmd.flags = MMC_RSP_R2 | MMC_CMD_AC;
  186. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  187. if (err)
  188. return err;
  189. memcpy(cxd, cmd.resp, sizeof(u32) * 4);
  190. return 0;
  191. }
  192. /*
  193. * NOTE: void *buf, caller for the buf is required to use DMA-capable
  194. * buffer or on-stack buffer (with some overhead in callee).
  195. */
  196. static int
  197. mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
  198. u32 opcode, void *buf, unsigned len)
  199. {
  200. struct mmc_request mrq = {};
  201. struct mmc_command cmd = {};
  202. struct mmc_data data = {};
  203. struct scatterlist sg;
  204. mrq.cmd = &cmd;
  205. mrq.data = &data;
  206. cmd.opcode = opcode;
  207. cmd.arg = 0;
  208. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  209. * rely on callers to never use this with "native" calls for reading
  210. * CSD or CID. Native versions of those commands use the R2 type,
  211. * not R1 plus a data block.
  212. */
  213. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  214. data.blksz = len;
  215. data.blocks = 1;
  216. data.flags = MMC_DATA_READ;
  217. data.sg = &sg;
  218. data.sg_len = 1;
  219. sg_init_one(&sg, buf, len);
  220. if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
  221. /*
  222. * The spec states that CSR and CID accesses have a timeout
  223. * of 64 clock cycles.
  224. */
  225. data.timeout_ns = 0;
  226. data.timeout_clks = 64;
  227. } else
  228. mmc_set_data_timeout(&data, card);
  229. mmc_wait_for_req(host, &mrq);
  230. if (cmd.error)
  231. return cmd.error;
  232. if (data.error)
  233. return data.error;
  234. return 0;
  235. }
  236. static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd)
  237. {
  238. int ret, i;
  239. __be32 *csd_tmp;
  240. csd_tmp = kzalloc(16, GFP_KERNEL);
  241. if (!csd_tmp)
  242. return -ENOMEM;
  243. ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);
  244. if (ret)
  245. goto err;
  246. for (i = 0; i < 4; i++)
  247. csd[i] = be32_to_cpu(csd_tmp[i]);
  248. err:
  249. kfree(csd_tmp);
  250. return ret;
  251. }
  252. int mmc_send_csd(struct mmc_card *card, u32 *csd)
  253. {
  254. if (mmc_host_is_spi(card->host))
  255. return mmc_spi_send_csd(card, csd);
  256. return mmc_send_cxd_native(card->host, card->rca << 16, csd,
  257. MMC_SEND_CSD);
  258. }
  259. static int mmc_spi_send_cid(struct mmc_host *host, u32 *cid)
  260. {
  261. int ret, i;
  262. __be32 *cid_tmp;
  263. cid_tmp = kzalloc(16, GFP_KERNEL);
  264. if (!cid_tmp)
  265. return -ENOMEM;
  266. ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);
  267. if (ret)
  268. goto err;
  269. for (i = 0; i < 4; i++)
  270. cid[i] = be32_to_cpu(cid_tmp[i]);
  271. err:
  272. kfree(cid_tmp);
  273. return ret;
  274. }
  275. int mmc_send_cid(struct mmc_host *host, u32 *cid)
  276. {
  277. if (mmc_host_is_spi(host))
  278. return mmc_spi_send_cid(host, cid);
  279. return mmc_send_cxd_native(host, 0, cid, MMC_ALL_SEND_CID);
  280. }
  281. int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
  282. {
  283. int err;
  284. u8 *ext_csd;
  285. if (!card || !new_ext_csd)
  286. return -EINVAL;
  287. if (!mmc_can_ext_csd(card))
  288. return -EOPNOTSUPP;
  289. /*
  290. * As the ext_csd is so large and mostly unused, we don't store the
  291. * raw block in mmc_card.
  292. */
  293. ext_csd = kzalloc(512, GFP_KERNEL);
  294. if (!ext_csd)
  295. return -ENOMEM;
  296. err = mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd,
  297. 512);
  298. if (err)
  299. kfree(ext_csd);
  300. else
  301. *new_ext_csd = ext_csd;
  302. return err;
  303. }
  304. EXPORT_SYMBOL_GPL(mmc_get_ext_csd);
  305. int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
  306. {
  307. struct mmc_command cmd = {};
  308. int err;
  309. cmd.opcode = MMC_SPI_READ_OCR;
  310. cmd.arg = highcap ? (1 << 30) : 0;
  311. cmd.flags = MMC_RSP_SPI_R3;
  312. err = mmc_wait_for_cmd(host, &cmd, 0);
  313. *ocrp = cmd.resp[1];
  314. return err;
  315. }
  316. int mmc_spi_set_crc(struct mmc_host *host, int use_crc)
  317. {
  318. struct mmc_command cmd = {};
  319. int err;
  320. cmd.opcode = MMC_SPI_CRC_ON_OFF;
  321. cmd.flags = MMC_RSP_SPI_R1;
  322. cmd.arg = use_crc;
  323. err = mmc_wait_for_cmd(host, &cmd, 0);
  324. if (!err)
  325. host->use_spi_crc = use_crc;
  326. return err;
  327. }
  328. static int mmc_switch_status_error(struct mmc_host *host, u32 status)
  329. {
  330. if (mmc_host_is_spi(host)) {
  331. if (status & R1_SPI_ILLEGAL_COMMAND)
  332. return -EBADMSG;
  333. } else {
  334. if (status & 0xFDFFA000)
  335. pr_warn("%s: unexpected status %#x after switch\n",
  336. mmc_hostname(host), status);
  337. if (status & R1_SWITCH_ERROR)
  338. return -EBADMSG;
  339. }
  340. return 0;
  341. }
  342. #ifdef CONFIG_MTK_EMMC_HW_CQ
  343. /**
  344. * mmc_prepare_switch - helper; prepare to modify EXT_CSD register
  345. * @card: the MMC card associated with the data transfer
  346. * @set: cmd set values
  347. * @index: EXT_CSD register index
  348. * @value: value to program into EXT_CSD register
  349. * @tout_ms: timeout (ms) for operation performed by register write,
  350. * timeout of zero implies maximum possible timeout
  351. * @use_busy_signal: use the busy signal as response type
  352. *
  353. * Helper to prepare to modify EXT_CSD register for selected card.
  354. */
  355. static inline void mmc_prepare_switch(struct mmc_command *cmd, u8 index,
  356. u8 value, u8 set, unsigned int tout_ms,
  357. bool use_busy_signal)
  358. {
  359. cmd->opcode = MMC_SWITCH;
  360. cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
  361. (index << 16) |
  362. (value << 8) |
  363. set;
  364. cmd->flags = MMC_CMD_AC;
  365. cmd->busy_timeout = tout_ms;
  366. if (use_busy_signal)
  367. cmd->flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
  368. else
  369. cmd->flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
  370. }
  371. int __mmc_switch_cmdq_mode(struct mmc_command *cmd, u8 set, u8 index, u8 value,
  372. unsigned int timeout_ms, bool use_busy_signal,
  373. bool ignore_timeout)
  374. {
  375. mmc_prepare_switch(cmd, index, value, set, timeout_ms, use_busy_signal);
  376. return 0;
  377. }
  378. EXPORT_SYMBOL(__mmc_switch_cmdq_mode);
  379. #endif
  380. /* Caller must hold re-tuning */
  381. int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal)
  382. {
  383. u32 status;
  384. int err;
  385. err = mmc_send_status(card, &status);
  386. if (!crc_err_fatal && err == -EILSEQ)
  387. return 0;
  388. if (err)
  389. return err;
  390. return mmc_switch_status_error(card->host, status);
  391. }
  392. int mmc_switch_status(struct mmc_card *card)
  393. {
  394. return __mmc_switch_status(card, true);
  395. }
  396. static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
  397. bool send_status, bool retry_crc_err)
  398. {
  399. struct mmc_host *host = card->host;
  400. int err;
  401. unsigned long timeout;
  402. u32 status = 0;
  403. bool expired = false;
  404. bool busy = false;
  405. /* We have an unspecified cmd timeout, use the fallback value. */
  406. if (!timeout_ms)
  407. timeout_ms = MMC_OPS_TIMEOUT_MS;
  408. /*
  409. * In cases when not allowed to poll by using CMD13 or because we aren't
  410. * capable of polling by using ->card_busy(), then rely on waiting the
  411. * stated timeout to be sufficient.
  412. */
  413. if (!send_status && !host->ops->card_busy) {
  414. mmc_delay(timeout_ms);
  415. return 0;
  416. }
  417. timeout = jiffies + msecs_to_jiffies(timeout_ms) + 1;
  418. do {
  419. /*
  420. * Due to the possibility of being preempted while polling,
  421. * check the expiration time first.
  422. */
  423. expired = time_after(jiffies, timeout);
  424. if (host->ops->card_busy) {
  425. busy = host->ops->card_busy(host);
  426. } else {
  427. err = mmc_send_status(card, &status);
  428. if (retry_crc_err && err == -EILSEQ) {
  429. busy = true;
  430. } else if (err) {
  431. return err;
  432. } else {
  433. err = mmc_switch_status_error(host, status);
  434. if (err)
  435. return err;
  436. busy = R1_CURRENT_STATE(status) == R1_STATE_PRG;
  437. }
  438. }
  439. /* Timeout if the device still remains busy. */
  440. if (expired && busy) {
  441. pr_err("%s: Card stuck being busy! %s\n",
  442. mmc_hostname(host), __func__);
  443. return -ETIMEDOUT;
  444. }
  445. } while (busy);
  446. return 0;
  447. }
  448. /**
  449. * __mmc_switch - modify EXT_CSD register
  450. * @card: the MMC card associated with the data transfer
  451. * @set: cmd set values
  452. * @index: EXT_CSD register index
  453. * @value: value to program into EXT_CSD register
  454. * @timeout_ms: timeout (ms) for operation performed by register write,
  455. * timeout of zero implies maximum possible timeout
  456. * @timing: new timing to change to
  457. * @use_busy_signal: use the busy signal as response type
  458. * @send_status: send status cmd to poll for busy
  459. * @retry_crc_err: retry when CRC errors when polling with CMD13 for busy
  460. *
  461. * Modifies the EXT_CSD register for selected card.
  462. */
  463. int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  464. unsigned int timeout_ms, unsigned char timing,
  465. bool use_busy_signal, bool send_status, bool retry_crc_err)
  466. {
  467. struct mmc_host *host = card->host;
  468. int err;
  469. struct mmc_command cmd = {};
  470. bool use_r1b_resp = use_busy_signal;
  471. unsigned char old_timing = host->ios.timing;
  472. mmc_retune_hold(host);
  473. /*
  474. * If the cmd timeout and the max_busy_timeout of the host are both
  475. * specified, let's validate them. A failure means we need to prevent
  476. * the host from doing hw busy detection, which is done by converting
  477. * to a R1 response instead of a R1B.
  478. */
  479. if (timeout_ms && host->max_busy_timeout &&
  480. (timeout_ms > host->max_busy_timeout))
  481. use_r1b_resp = false;
  482. cmd.opcode = MMC_SWITCH;
  483. cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
  484. (index << 16) |
  485. (value << 8) |
  486. set;
  487. cmd.flags = MMC_CMD_AC;
  488. if (use_r1b_resp) {
  489. cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
  490. /*
  491. * A busy_timeout of zero means the host can decide to use
  492. * whatever value it finds suitable.
  493. */
  494. cmd.busy_timeout = timeout_ms;
  495. } else {
  496. cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
  497. }
  498. if (index == EXT_CSD_SANITIZE_START)
  499. cmd.sanitize_busy = true;
  500. err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
  501. if (err)
  502. goto out;
  503. /* No need to check card status in case of unblocking command */
  504. if (!use_busy_signal)
  505. goto out;
  506. /*If SPI or used HW busy detection above, then we don't need to poll. */
  507. if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
  508. mmc_host_is_spi(host))
  509. goto out_tim;
  510. /* Let's try to poll to find out when the command is completed. */
  511. err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
  512. if (err)
  513. goto out;
  514. out_tim:
  515. /* Switch to new timing before check switch status. */
  516. if (timing)
  517. mmc_set_timing(host, timing);
  518. if (send_status) {
  519. err = mmc_switch_status(card);
  520. if (err && timing)
  521. mmc_set_timing(host, old_timing);
  522. }
  523. out:
  524. mmc_retune_release(host);
  525. return err;
  526. }
  527. int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
  528. unsigned int timeout_ms)
  529. {
  530. return __mmc_switch(card, set, index, value, timeout_ms, 0,
  531. true, true, false);
  532. }
  533. EXPORT_SYMBOL_GPL(mmc_switch);
  534. int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
  535. {
  536. struct mmc_request mrq = {};
  537. struct mmc_command cmd = {};
  538. struct mmc_data data = {};
  539. struct scatterlist sg;
  540. struct mmc_ios *ios = &host->ios;
  541. const u8 *tuning_block_pattern;
  542. int size, err = 0;
  543. u8 *data_buf;
  544. if (ios->bus_width == MMC_BUS_WIDTH_8) {
  545. tuning_block_pattern = tuning_blk_pattern_8bit;
  546. size = sizeof(tuning_blk_pattern_8bit);
  547. } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
  548. tuning_block_pattern = tuning_blk_pattern_4bit;
  549. size = sizeof(tuning_blk_pattern_4bit);
  550. } else
  551. return -EINVAL;
  552. data_buf = kzalloc(size, GFP_KERNEL);
  553. if (!data_buf)
  554. return -ENOMEM;
  555. mrq.cmd = &cmd;
  556. mrq.data = &data;
  557. cmd.opcode = opcode;
  558. cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
  559. data.blksz = size;
  560. data.blocks = 1;
  561. data.flags = MMC_DATA_READ;
  562. /*
  563. * According to the tuning specs, Tuning process
  564. * is normally shorter 40 executions of CMD19,
  565. * and timeout value should be shorter than 150 ms
  566. */
  567. data.timeout_ns = 150 * NSEC_PER_MSEC;
  568. data.sg = &sg;
  569. data.sg_len = 1;
  570. sg_init_one(&sg, data_buf, size);
  571. mmc_wait_for_req(host, &mrq);
  572. if (cmd_error)
  573. *cmd_error = cmd.error;
  574. if (cmd.error) {
  575. err = cmd.error;
  576. goto out;
  577. }
  578. if (data.error) {
  579. err = data.error;
  580. goto out;
  581. }
  582. if (memcmp(data_buf, tuning_block_pattern, size))
  583. err = -EIO;
  584. out:
  585. kfree(data_buf);
  586. return err;
  587. }
  588. EXPORT_SYMBOL_GPL(mmc_send_tuning);
  589. int mmc_abort_tuning(struct mmc_host *host, u32 opcode)
  590. {
  591. struct mmc_command cmd = {};
  592. /*
  593. * eMMC specification specifies that CMD12 can be used to stop a tuning
  594. * command, but SD specification does not, so do nothing unless it is
  595. * eMMC.
  596. */
  597. if (opcode != MMC_SEND_TUNING_BLOCK_HS200)
  598. return 0;
  599. cmd.opcode = MMC_STOP_TRANSMISSION;
  600. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  601. /*
  602. * For drivers that override R1 to R1b, set an arbitrary timeout based
  603. * on the tuning timeout i.e. 150ms.
  604. */
  605. cmd.busy_timeout = 150;
  606. return mmc_wait_for_cmd(host, &cmd, 0);
  607. }
  608. EXPORT_SYMBOL_GPL(mmc_abort_tuning);
  609. static int
  610. mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
  611. u8 len)
  612. {
  613. struct mmc_request mrq = {};
  614. struct mmc_command cmd = {};
  615. struct mmc_data data = {};
  616. struct scatterlist sg;
  617. u8 *data_buf;
  618. u8 *test_buf;
  619. int i, err;
  620. static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
  621. static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
  622. /* dma onto stack is unsafe/nonportable, but callers to this
  623. * routine normally provide temporary on-stack buffers ...
  624. */
  625. data_buf = kmalloc(len, GFP_KERNEL);
  626. if (!data_buf)
  627. return -ENOMEM;
  628. if (len == 8)
  629. test_buf = testdata_8bit;
  630. else if (len == 4)
  631. test_buf = testdata_4bit;
  632. else {
  633. pr_err("%s: Invalid bus_width %d\n",
  634. mmc_hostname(host), len);
  635. kfree(data_buf);
  636. return -EINVAL;
  637. }
  638. if (opcode == MMC_BUS_TEST_W)
  639. memcpy(data_buf, test_buf, len);
  640. mrq.cmd = &cmd;
  641. mrq.data = &data;
  642. cmd.opcode = opcode;
  643. cmd.arg = 0;
  644. /* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
  645. * rely on callers to never use this with "native" calls for reading
  646. * CSD or CID. Native versions of those commands use the R2 type,
  647. * not R1 plus a data block.
  648. */
  649. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  650. data.blksz = len;
  651. data.blocks = 1;
  652. if (opcode == MMC_BUS_TEST_R)
  653. data.flags = MMC_DATA_READ;
  654. else
  655. data.flags = MMC_DATA_WRITE;
  656. data.sg = &sg;
  657. data.sg_len = 1;
  658. mmc_set_data_timeout(&data, card);
  659. sg_init_one(&sg, data_buf, len);
  660. mmc_wait_for_req(host, &mrq);
  661. err = 0;
  662. if (opcode == MMC_BUS_TEST_R) {
  663. for (i = 0; i < len / 4; i++)
  664. if ((test_buf[i] ^ data_buf[i]) != 0xff) {
  665. err = -EIO;
  666. break;
  667. }
  668. }
  669. kfree(data_buf);
  670. if (cmd.error)
  671. return cmd.error;
  672. if (data.error)
  673. return data.error;
  674. return err;
  675. }
  676. int mmc_bus_test(struct mmc_card *card, u8 bus_width)
  677. {
  678. int width;
  679. if (bus_width == MMC_BUS_WIDTH_8)
  680. width = 8;
  681. else if (bus_width == MMC_BUS_WIDTH_4)
  682. width = 4;
  683. else if (bus_width == MMC_BUS_WIDTH_1)
  684. return 0; /* no need for test */
  685. else
  686. return -EINVAL;
  687. /*
  688. * Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
  689. * is a problem. This improves chances that the test will work.
  690. */
  691. mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
  692. return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
  693. }
  694. static int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
  695. {
  696. struct mmc_command cmd = {};
  697. unsigned int opcode;
  698. int err;
  699. if (!card->ext_csd.hpi) {
  700. pr_warn("%s: Card didn't support HPI command\n",
  701. mmc_hostname(card->host));
  702. return -EINVAL;
  703. }
  704. opcode = card->ext_csd.hpi_cmd;
  705. if (opcode == MMC_STOP_TRANSMISSION)
  706. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  707. else if (opcode == MMC_SEND_STATUS)
  708. cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
  709. cmd.opcode = opcode;
  710. cmd.arg = card->rca << 16 | 1;
  711. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  712. if (err) {
  713. pr_warn("%s: error %d interrupting operation. "
  714. "HPI command response %#x\n", mmc_hostname(card->host),
  715. err, cmd.resp[0]);
  716. return err;
  717. }
  718. if (status)
  719. *status = cmd.resp[0];
  720. return 0;
  721. }
  722. /**
  723. * mmc_interrupt_hpi - Issue for High priority Interrupt
  724. * @card: the MMC card associated with the HPI transfer
  725. *
  726. * Issued High Priority Interrupt, and check for card status
  727. * until out-of prg-state.
  728. */
  729. int mmc_interrupt_hpi(struct mmc_card *card)
  730. {
  731. int err;
  732. u32 status;
  733. unsigned long prg_wait;
  734. if (!card->ext_csd.hpi_en) {
  735. pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
  736. return 1;
  737. }
  738. mmc_claim_host(card->host);
  739. err = mmc_send_status(card, &status);
  740. if (err) {
  741. pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
  742. goto out;
  743. }
  744. switch (R1_CURRENT_STATE(status)) {
  745. case R1_STATE_IDLE:
  746. case R1_STATE_READY:
  747. case R1_STATE_STBY:
  748. case R1_STATE_TRAN:
  749. /*
  750. * In idle and transfer states, HPI is not needed and the caller
  751. * can issue the next intended command immediately
  752. */
  753. goto out;
  754. case R1_STATE_PRG:
  755. break;
  756. default:
  757. /* In all other states, it's illegal to issue HPI */
  758. pr_debug("%s: HPI cannot be sent. Card state=%d\n",
  759. mmc_hostname(card->host), R1_CURRENT_STATE(status));
  760. err = -EINVAL;
  761. goto out;
  762. }
  763. err = mmc_send_hpi_cmd(card, &status);
  764. if (err)
  765. goto out;
  766. prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
  767. do {
  768. err = mmc_send_status(card, &status);
  769. if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
  770. break;
  771. if (time_after(jiffies, prg_wait))
  772. err = -ETIMEDOUT;
  773. } while (!err);
  774. out:
  775. mmc_release_host(card->host);
  776. return err;
  777. }
  778. int mmc_can_ext_csd(struct mmc_card *card)
  779. {
  780. return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
  781. }
  782. /**
  783. * mmc_stop_bkops - stop ongoing BKOPS
  784. * @card: MMC card to check BKOPS
  785. *
  786. * Send HPI command to stop ongoing background operations to
  787. * allow rapid servicing of foreground operations, e.g. read/
  788. * writes. Wait until the card comes out of the programming state
  789. * to avoid errors in servicing read/write requests.
  790. */
  791. int mmc_stop_bkops(struct mmc_card *card)
  792. {
  793. int err = 0;
  794. err = mmc_interrupt_hpi(card);
  795. /*
  796. * If err is EINVAL, we can't issue an HPI.
  797. * It should complete the BKOPS.
  798. */
  799. if (!err || (err == -EINVAL)) {
  800. mmc_card_clr_doing_bkops(card);
  801. mmc_retune_release(card->host);
  802. err = 0;
  803. }
  804. return err;
  805. }
  806. static int mmc_read_bkops_status(struct mmc_card *card)
  807. {
  808. int err;
  809. u8 *ext_csd;
  810. mmc_claim_host(card->host);
  811. err = mmc_get_ext_csd(card, &ext_csd);
  812. mmc_release_host(card->host);
  813. if (err)
  814. return err;
  815. card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
  816. card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
  817. kfree(ext_csd);
  818. return 0;
  819. }
  820. /**
  821. * mmc_start_bkops - start BKOPS for supported cards
  822. * @card: MMC card to start BKOPS
  823. * @from_exception: A flag to indicate if this function was
  824. * called due to an exception raised by the card
  825. *
  826. * Start background operations whenever requested.
  827. * When the urgent BKOPS bit is set in a R1 command response
  828. * then background operations should be started immediately.
  829. */
  830. void mmc_start_bkops(struct mmc_card *card, bool from_exception)
  831. {
  832. int err;
  833. int timeout;
  834. bool use_busy_signal;
  835. if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
  836. return;
  837. err = mmc_read_bkops_status(card);
  838. if (err) {
  839. pr_err("%s: Failed to read bkops status: %d\n",
  840. mmc_hostname(card->host), err);
  841. return;
  842. }
  843. if (!card->ext_csd.raw_bkops_status)
  844. return;
  845. if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
  846. from_exception)
  847. return;
  848. mmc_claim_host(card->host);
  849. if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
  850. timeout = MMC_OPS_TIMEOUT_MS;
  851. use_busy_signal = true;
  852. } else {
  853. timeout = 0;
  854. use_busy_signal = false;
  855. }
  856. mmc_retune_hold(card->host);
  857. err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  858. EXT_CSD_BKOPS_START, 1, timeout, 0,
  859. use_busy_signal, true, false);
  860. if (err) {
  861. pr_warn("%s: Error %d starting bkops\n",
  862. mmc_hostname(card->host), err);
  863. mmc_retune_release(card->host);
  864. goto out;
  865. }
  866. /*
  867. * For urgent bkops status (LEVEL_2 and more)
  868. * bkops executed synchronously, otherwise
  869. * the operation is in progress
  870. */
  871. if (!use_busy_signal)
  872. mmc_card_set_doing_bkops(card);
  873. else
  874. mmc_retune_release(card->host);
  875. out:
  876. mmc_release_host(card->host);
  877. }
  878. /*
  879. * Flush the cache to the non-volatile storage.
  880. */
  881. int mmc_flush_cache(struct mmc_card *card)
  882. {
  883. int err = 0;
  884. if (mmc_card_mmc(card) &&
  885. (card->ext_csd.cache_size > 0) &&
  886. (card->ext_csd.cache_ctrl & 1)) {
  887. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  888. EXT_CSD_FLUSH_CACHE, 1, 0);
  889. if (err)
  890. pr_err("%s: cache flush error %d\n",
  891. mmc_hostname(card->host), err);
  892. }
  893. return err;
  894. }
  895. EXPORT_SYMBOL(mmc_flush_cache);
  896. #ifdef CONFIG_MTK_EMMC_HW_CQ
  897. int mmc_discard_queue(struct mmc_host *host, u32 tasks)
  898. {
  899. struct mmc_command cmd = {0};
  900. cmd.opcode = MMC_CMDQ_TASK_MGMT;
  901. if (tasks) {
  902. cmd.arg = DISCARD_TASK;
  903. cmd.arg |= (tasks << 16);
  904. } else {
  905. cmd.arg = DISCARD_QUEUE;
  906. }
  907. cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
  908. return mmc_wait_for_cmd(host, &cmd, 0);
  909. }
  910. EXPORT_SYMBOL(mmc_discard_queue);
  911. #endif
  912. static int mmc_cmdq_switch(struct mmc_card *card, bool enable)
  913. {
  914. #if defined(CONFIG_MTK_EMMC_CQ_SUPPORT) || defined(CONFIG_MTK_EMMC_HW_CQ)
  915. return mmc_blk_cmdq_switch(card, enable);
  916. #else
  917. return 0;
  918. #endif
  919. }
  920. int mmc_cmdq_enable(struct mmc_card *card)
  921. {
  922. return mmc_cmdq_switch(card, true);
  923. }
  924. EXPORT_SYMBOL_GPL(mmc_cmdq_enable);
  925. int mmc_cmdq_disable(struct mmc_card *card)
  926. {
  927. return mmc_cmdq_switch(card, false);
  928. }
  929. EXPORT_SYMBOL_GPL(mmc_cmdq_disable);