block.c 92 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658
  1. /*
  2. * Block driver for media (i.e., flash cards)
  3. *
  4. * Copyright 2002 Hewlett-Packard Company
  5. * Copyright 2005-2008 Pierre Ossman
  6. *
  7. * Use consistent with the GNU GPL is permitted,
  8. * provided that this copyright notice is
  9. * preserved in its entirety in all copies and derived works.
  10. *
  11. * HEWLETT-PACKARD COMPANY MAKES NO WARRANTIES, EXPRESSED OR IMPLIED,
  12. * AS TO THE USEFULNESS OR CORRECTNESS OF THIS CODE OR ITS
  13. * FITNESS FOR ANY PARTICULAR PURPOSE.
  14. *
  15. * Many thanks to Alessandro Rubini and Jonathan Corbet!
  16. *
  17. * Author: Andrew Christian
  18. * 28 May 2002
  19. */
  20. #include <linux/moduleparam.h>
  21. #include <linux/module.h>
  22. #include <linux/init.h>
  23. #include <linux/kernel.h>
  24. #include <linux/fs.h>
  25. #include <linux/slab.h>
  26. #include <linux/errno.h>
  27. #include <linux/hdreg.h>
  28. #include <linux/kdev_t.h>
  29. #include <linux/blkdev.h>
  30. #include <linux/mutex.h>
  31. #include <linux/scatterlist.h>
  32. #include <linux/bitops.h>
  33. #include <linux/string_helpers.h>
  34. #include <linux/delay.h>
  35. #include <linux/capability.h>
  36. #include <linux/compat.h>
  37. #include <linux/pm_runtime.h>
  38. #include <linux/sysfs.h>
  39. #define CREATE_TRACE_POINTS
  40. #include <trace/events/mmc.h>
  41. #include <linux/mmc/ioctl.h>
  42. #include <linux/mmc/card.h>
  43. #include <linux/mmc/host.h>
  44. #include <linux/mmc/mmc.h>
  45. #include <linux/mmc/sd.h>
  46. #include <asm/uaccess.h>
  47. #include "queue.h"
  48. #include "../core/core.h"
  49. MODULE_ALIAS("mmc:block");
  50. #if defined(CONFIG_MMC_CPRM)
  51. #include "cprmdrv_samsung.h"
  52. #include <linux/ioctl.h>
  53. #define MMC_IOCTL_BASE 0xB3 /* Same as MMC block device major number */
  54. #define MMC_IOCTL_GET_SECTOR_COUNT _IOR(MMC_IOCTL_BASE, 100, int)
  55. #define MMC_IOCTL_GET_SECTOR_SIZE _IOR(MMC_IOCTL_BASE, 101, int)
  56. #define MMC_IOCTL_GET_BLOCK_SIZE _IOR(MMC_IOCTL_BASE, 102, int)
  57. #define MMC_IOCTL_SET_RETRY_AKE_PROCESS _IOR(MMC_IOCTL_BASE, 104, int)
  58. static int cprm_ake_retry_flag;
  59. #endif
  60. #ifdef MODULE_PARAM_PREFIX
  61. #undef MODULE_PARAM_PREFIX
  62. #endif
  63. #define MODULE_PARAM_PREFIX "mmcblk."
  64. #define INAND_CMD38_ARG_EXT_CSD 113
  65. #define INAND_CMD38_ARG_ERASE 0x00
  66. #define INAND_CMD38_ARG_TRIM 0x01
  67. #define INAND_CMD38_ARG_SECERASE 0x80
  68. #define INAND_CMD38_ARG_SECTRIM1 0x81
  69. #define INAND_CMD38_ARG_SECTRIM2 0x88
  70. #define MMC_BLK_TIMEOUT_MS (30 * 1000) /* 30 sec timeout */
  71. #define MMC_SANITIZE_REQ_TIMEOUT 240000 /* msec */
  72. #define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
  73. (req->cmd_flags & REQ_META)) && \
  74. (rq_data_dir(req) == WRITE))
  75. #define PACKED_CMD_VER 0x01
  76. #define PACKED_CMD_WR 0x02
  77. #define PACKED_TRIGGER_MAX_ELEMENTS 5000
  78. #define MMC_BLK_MAX_RETRIES 5 /* max # of retries before aborting a command */
  79. #define MMC_BLK_UPDATE_STOP_REASON(stats, reason) \
  80. do { \
  81. if (stats->enabled) \
  82. stats->pack_stop_reason[reason]++; \
  83. } while (0)
  84. #define PCKD_TRGR_INIT_MEAN_POTEN 17
  85. #define PCKD_TRGR_POTEN_LOWER_BOUND 5
  86. #define PCKD_TRGR_URGENT_PENALTY 2
  87. #define PCKD_TRGR_LOWER_BOUND 5
  88. #define PCKD_TRGR_PRECISION_MULTIPLIER 100
  89. static DEFINE_MUTEX(block_mutex);
  90. /*
  91. * The defaults come from config options but can be overriden by module
  92. * or bootarg options.
  93. */
  94. static int perdev_minors = CONFIG_MMC_BLOCK_MINORS;
  95. /*
  96. * We've only got one major, so number of mmcblk devices is
  97. * limited to 256 / number of minors per device.
  98. */
  99. static int max_devices;
  100. /* 256 minors, so at most 256 separate devices */
  101. static DECLARE_BITMAP(dev_use, 256);
  102. static DECLARE_BITMAP(name_use, 256);
  103. /*
  104. * There is one mmc_blk_data per slot.
  105. */
  106. struct mmc_blk_data {
  107. spinlock_t lock;
  108. struct gendisk *disk;
  109. struct mmc_queue queue;
  110. struct list_head part;
  111. unsigned int flags;
  112. #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
  113. #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
  114. unsigned int usage;
  115. unsigned int read_only;
  116. unsigned int part_type;
  117. unsigned int name_idx;
  118. unsigned int reset_done;
  119. #define MMC_BLK_READ BIT(0)
  120. #define MMC_BLK_WRITE BIT(1)
  121. #define MMC_BLK_DISCARD BIT(2)
  122. #define MMC_BLK_SECDISCARD BIT(3)
  123. /*
  124. * Only set in main mmc_blk_data associated
  125. * with mmc_card with mmc_set_drvdata, and keeps
  126. * track of the current selected device partition.
  127. */
  128. unsigned int part_curr;
  129. struct device_attribute force_ro;
  130. struct device_attribute power_ro_lock;
  131. struct device_attribute num_wr_reqs_to_start_packing;
  132. struct device_attribute bkops_check_threshold;
  133. struct device_attribute no_pack_for_random;
  134. int area_type;
  135. };
  136. static DEFINE_MUTEX(open_lock);
  137. enum {
  138. MMC_PACKED_N_IDX = -1,
  139. MMC_PACKED_N_ZERO,
  140. MMC_PACKED_N_SINGLE,
  141. };
  142. module_param(perdev_minors, int, 0444);
  143. MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
  144. static inline int mmc_blk_part_switch(struct mmc_card *card,
  145. struct mmc_blk_data *md);
  146. static int get_card_status(struct mmc_card *card, u32 *status, int retries);
  147. static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
  148. {
  149. mqrq->packed_cmd = MMC_PACKED_NONE;
  150. mqrq->packed_num = MMC_PACKED_N_ZERO;
  151. }
  152. static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
  153. {
  154. struct mmc_blk_data *md;
  155. mutex_lock(&open_lock);
  156. md = disk->private_data;
  157. if (md && md->usage == 0)
  158. md = NULL;
  159. if (md)
  160. md->usage++;
  161. mutex_unlock(&open_lock);
  162. return md;
  163. }
  164. static inline int mmc_get_devidx(struct gendisk *disk)
  165. {
  166. int devidx = disk->first_minor / perdev_minors;
  167. return devidx;
  168. }
  169. static void mmc_blk_put(struct mmc_blk_data *md)
  170. {
  171. mutex_lock(&open_lock);
  172. md->usage--;
  173. if (md->usage == 0) {
  174. int devidx = mmc_get_devidx(md->disk);
  175. blk_cleanup_queue(md->queue.queue);
  176. __clear_bit(devidx, dev_use);
  177. put_disk(md->disk);
  178. kfree(md);
  179. }
  180. mutex_unlock(&open_lock);
  181. }
  182. static ssize_t power_ro_lock_show(struct device *dev,
  183. struct device_attribute *attr, char *buf)
  184. {
  185. int ret;
  186. struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
  187. struct mmc_card *card;
  188. int locked = 0;
  189. if (!md)
  190. return -EINVAL;
  191. card = md->queue.card;
  192. if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
  193. locked = 2;
  194. else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
  195. locked = 1;
  196. ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
  197. mmc_blk_put(md);
  198. return ret;
  199. }
  200. static ssize_t power_ro_lock_store(struct device *dev,
  201. struct device_attribute *attr, const char *buf, size_t count)
  202. {
  203. int ret;
  204. struct mmc_blk_data *md, *part_md;
  205. struct mmc_card *card;
  206. unsigned long set;
  207. if (kstrtoul(buf, 0, &set))
  208. return -EINVAL;
  209. if (set != 1)
  210. return count;
  211. md = mmc_blk_get(dev_to_disk(dev));
  212. if (!md)
  213. return -EINVAL;
  214. card = md->queue.card;
  215. mmc_rpm_hold(card->host, &card->dev);
  216. mmc_claim_host(card->host);
  217. ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
  218. card->ext_csd.boot_ro_lock |
  219. EXT_CSD_BOOT_WP_B_PWR_WP_EN,
  220. card->ext_csd.part_time);
  221. if (ret)
  222. pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
  223. else
  224. card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
  225. mmc_release_host(card->host);
  226. mmc_rpm_release(card->host, &card->dev);
  227. if (!ret) {
  228. pr_info("%s: Locking boot partition ro until next power on\n",
  229. md->disk->disk_name);
  230. set_disk_ro(md->disk, 1);
  231. list_for_each_entry(part_md, &md->part, part)
  232. if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
  233. pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
  234. set_disk_ro(part_md->disk, 1);
  235. }
  236. }
  237. mmc_blk_put(md);
  238. return count;
  239. }
  240. static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
  241. char *buf)
  242. {
  243. int ret;
  244. struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
  245. if (!md)
  246. return -EINVAL;
  247. ret = snprintf(buf, PAGE_SIZE, "%d",
  248. get_disk_ro(dev_to_disk(dev)) ^
  249. md->read_only);
  250. mmc_blk_put(md);
  251. return ret;
  252. }
  253. static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
  254. const char *buf, size_t count)
  255. {
  256. int ret;
  257. char *end;
  258. struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
  259. unsigned long set = simple_strtoul(buf, &end, 0);
  260. if (!md)
  261. return -EINVAL;
  262. if (end == buf) {
  263. ret = -EINVAL;
  264. goto out;
  265. }
  266. set_disk_ro(dev_to_disk(dev), set || md->read_only);
  267. ret = count;
  268. out:
  269. mmc_blk_put(md);
  270. return ret;
  271. }
  272. static ssize_t
  273. num_wr_reqs_to_start_packing_show(struct device *dev,
  274. struct device_attribute *attr, char *buf)
  275. {
  276. struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
  277. int num_wr_reqs_to_start_packing;
  278. int ret;
  279. if (!md)
  280. return -EINVAL;
  281. num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
  282. ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
  283. mmc_blk_put(md);
  284. return ret;
  285. }
  286. static ssize_t
  287. num_wr_reqs_to_start_packing_store(struct device *dev,
  288. struct device_attribute *attr,
  289. const char *buf, size_t count)
  290. {
  291. int value;
  292. struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
  293. struct mmc_card *card;
  294. int ret = count;
  295. if (!md)
  296. return -EINVAL;
  297. card = md->queue.card;
  298. if (!card) {
  299. ret = -EINVAL;
  300. goto exit;
  301. }
  302. sscanf(buf, "%d", &value);
  303. if (value >= 0) {
  304. md->queue.num_wr_reqs_to_start_packing =
  305. min_t(int, value, (int)card->ext_csd.max_packed_writes);
  306. pr_debug("%s: trigger to pack: new value = %d",
  307. mmc_hostname(card->host),
  308. md->queue.num_wr_reqs_to_start_packing);
  309. } else {
  310. pr_err("%s: value %d is not valid. old value remains = %d",
  311. mmc_hostname(card->host), value,
  312. md->queue.num_wr_reqs_to_start_packing);
  313. ret = -EINVAL;
  314. }
  315. exit:
  316. mmc_blk_put(md);
  317. return ret;
  318. }
  319. static ssize_t
  320. bkops_check_threshold_show(struct device *dev,
  321. struct device_attribute *attr, char *buf)
  322. {
  323. struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
  324. struct mmc_card *card;
  325. int ret;
  326. if (!md)
  327. return -EINVAL;
  328. card = md->queue.card;
  329. if (!card)
  330. ret = -EINVAL;
  331. else
  332. ret = snprintf(buf, PAGE_SIZE, "%d\n",
  333. card->bkops_info.size_percentage_to_queue_delayed_work);
  334. mmc_blk_put(md);
  335. return ret;
  336. }
  337. static ssize_t
  338. bkops_check_threshold_store(struct device *dev,
  339. struct device_attribute *attr,
  340. const char *buf, size_t count)
  341. {
  342. int value;
  343. struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
  344. struct mmc_card *card;
  345. unsigned int card_size;
  346. int ret = count;
  347. if (!md)
  348. return -EINVAL;
  349. card = md->queue.card;
  350. if (!card) {
  351. ret = -EINVAL;
  352. goto exit;
  353. }
  354. sscanf(buf, "%d", &value);
  355. if ((value <= 0) || (value >= 100)) {
  356. ret = -EINVAL;
  357. goto exit;
  358. }
  359. card_size = (unsigned int)get_capacity(md->disk);
  360. if (card_size <= 0) {
  361. ret = -EINVAL;
  362. goto exit;
  363. }
  364. card->bkops_info.size_percentage_to_queue_delayed_work = value;
  365. card->bkops_info.min_sectors_to_queue_delayed_work =
  366. (card_size * value) / 100;
  367. pr_debug("%s: size_percentage = %d, min_sectors = %d",
  368. mmc_hostname(card->host),
  369. card->bkops_info.size_percentage_to_queue_delayed_work,
  370. card->bkops_info.min_sectors_to_queue_delayed_work);
  371. exit:
  372. mmc_blk_put(md);
  373. return count;
  374. }
  375. static ssize_t
  376. no_pack_for_random_show(struct device *dev,
  377. struct device_attribute *attr, char *buf)
  378. {
  379. struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
  380. int ret;
  381. if (!md)
  382. return -EINVAL;
  383. ret = snprintf(buf, PAGE_SIZE, "%d\n", md->queue.no_pack_for_random);
  384. mmc_blk_put(md);
  385. return ret;
  386. }
  387. static ssize_t
  388. no_pack_for_random_store(struct device *dev,
  389. struct device_attribute *attr,
  390. const char *buf, size_t count)
  391. {
  392. int value;
  393. struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
  394. struct mmc_card *card;
  395. int ret = count;
  396. if (!md)
  397. return -EINVAL;
  398. card = md->queue.card;
  399. if (!card) {
  400. ret = -EINVAL;
  401. goto exit;
  402. }
  403. sscanf(buf, "%d", &value);
  404. if (value < 0) {
  405. pr_err("%s: value %d is not valid. old value remains = %d",
  406. mmc_hostname(card->host), value,
  407. md->queue.no_pack_for_random);
  408. ret = -EINVAL;
  409. goto exit;
  410. }
  411. md->queue.no_pack_for_random = (value > 0) ? true : false;
  412. pr_debug("%s: no_pack_for_random: new value = %d",
  413. mmc_hostname(card->host),
  414. md->queue.no_pack_for_random);
  415. exit:
  416. mmc_blk_put(md);
  417. return ret;
  418. }
  419. static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
  420. {
  421. struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
  422. int ret = -ENXIO;
  423. mutex_lock(&block_mutex);
  424. if (md) {
  425. if (md->usage == 2)
  426. check_disk_change(bdev);
  427. ret = 0;
  428. if ((mode & FMODE_WRITE) && md->read_only) {
  429. mmc_blk_put(md);
  430. ret = -EROFS;
  431. }
  432. }
  433. mutex_unlock(&block_mutex);
  434. return ret;
  435. }
  436. static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
  437. {
  438. struct mmc_blk_data *md = disk->private_data;
  439. mutex_lock(&block_mutex);
  440. mmc_blk_put(md);
  441. mutex_unlock(&block_mutex);
  442. return 0;
  443. }
  444. static int
  445. mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  446. {
  447. geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
  448. geo->heads = 4;
  449. geo->sectors = 16;
  450. return 0;
  451. }
  452. struct mmc_blk_ioc_data {
  453. struct mmc_ioc_cmd ic;
  454. unsigned char *buf;
  455. u64 buf_bytes;
  456. };
  457. static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
  458. struct mmc_ioc_cmd __user *user)
  459. {
  460. struct mmc_blk_ioc_data *idata;
  461. int err;
  462. idata = kzalloc(sizeof(*idata), GFP_KERNEL);
  463. if (!idata) {
  464. err = -ENOMEM;
  465. goto out;
  466. }
  467. if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
  468. err = -EFAULT;
  469. goto idata_err;
  470. }
  471. idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
  472. if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
  473. err = -EOVERFLOW;
  474. goto idata_err;
  475. }
  476. if (!idata->buf_bytes)
  477. return idata;
  478. idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
  479. if (!idata->buf) {
  480. err = -ENOMEM;
  481. goto idata_err;
  482. }
  483. if (copy_from_user(idata->buf, (void __user *)(unsigned long)
  484. idata->ic.data_ptr, idata->buf_bytes)) {
  485. err = -EFAULT;
  486. goto copy_err;
  487. }
  488. return idata;
  489. copy_err:
  490. kfree(idata->buf);
  491. idata_err:
  492. kfree(idata);
  493. out:
  494. return ERR_PTR(err);
  495. }
  496. struct scatterlist *mmc_blk_get_sg(struct mmc_card *card,
  497. unsigned char *buf, int *sg_len, int size)
  498. {
  499. struct scatterlist *sg;
  500. struct scatterlist *sl;
  501. int total_sec_cnt, sec_cnt;
  502. int max_seg_size, len;
  503. total_sec_cnt = size;
  504. max_seg_size = card->host->max_seg_size;
  505. len = (size - 1 + max_seg_size) / max_seg_size;
  506. sl = kmalloc(sizeof(struct scatterlist) * len, GFP_KERNEL);
  507. if (!sl) {
  508. return NULL;
  509. }
  510. sg = (struct scatterlist *)sl;
  511. sg_init_table(sg, len);
  512. while (total_sec_cnt) {
  513. if (total_sec_cnt < max_seg_size)
  514. sec_cnt = total_sec_cnt;
  515. else
  516. sec_cnt = max_seg_size;
  517. sg_set_page(sg, virt_to_page(buf), sec_cnt, offset_in_page(buf));
  518. buf = buf + sec_cnt;
  519. total_sec_cnt = total_sec_cnt - sec_cnt;
  520. if (total_sec_cnt == 0)
  521. break;
  522. sg = sg_next(sg);
  523. }
  524. if (sg)
  525. sg_mark_end(sg);
  526. *sg_len = len;
  527. return sl;
  528. }
  529. static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
  530. u32 retries_max)
  531. {
  532. int err;
  533. u32 retry_count = 0;
  534. if (!status || !retries_max)
  535. return -EINVAL;
  536. do {
  537. err = get_card_status(card, status, 5);
  538. if (err)
  539. break;
  540. if (!R1_STATUS(*status) &&
  541. (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
  542. break; /* RPMB programming operation complete */
  543. /*
  544. * Rechedule to give the MMC device a chance to continue
  545. * processing the previous command without being polled too
  546. * frequently.
  547. */
  548. usleep_range(1000, 5000);
  549. } while (++retry_count < retries_max);
  550. if (retry_count == retries_max)
  551. err = -EPERM;
  552. return err;
  553. }
  554. static int mmc_blk_ioctl_cmd(struct block_device *bdev,
  555. struct mmc_ioc_cmd __user *ic_ptr)
  556. {
  557. struct mmc_blk_ioc_data *idata;
  558. struct mmc_blk_data *md;
  559. struct mmc_card *card;
  560. struct mmc_command cmd = {0};
  561. struct mmc_data data = {0};
  562. struct mmc_request mrq = {NULL};
  563. struct scatterlist *sg = 0;
  564. int err = 0;
  565. int is_rpmb = false;
  566. u32 status = 0;
  567. /*
  568. * The caller must have CAP_SYS_RAWIO, and must be calling this on the
  569. * whole block device, not on a partition. This prevents overspray
  570. * between sibling partitions.
  571. */
  572. if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
  573. return -EPERM;
  574. idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
  575. if (IS_ERR(idata))
  576. return PTR_ERR(idata);
  577. md = mmc_blk_get(bdev->bd_disk);
  578. if (!md) {
  579. err = -EINVAL;
  580. goto blk_err;
  581. }
  582. if (md->area_type & MMC_BLK_DATA_AREA_RPMB) {
  583. is_rpmb = true;
  584. }
  585. card = md->queue.card;
  586. if (IS_ERR(card)) {
  587. err = PTR_ERR(card);
  588. goto cmd_done;
  589. }
  590. cmd.opcode = idata->ic.opcode;
  591. cmd.arg = idata->ic.arg;
  592. cmd.flags = idata->ic.flags;
  593. if (idata->buf_bytes) {
  594. int len;
  595. data.blksz = idata->ic.blksz;
  596. data.blocks = idata->ic.blocks;
  597. sg = mmc_blk_get_sg(card, idata->buf, &len, idata->buf_bytes);
  598. data.sg = sg;
  599. data.sg_len = len;
  600. if (idata->ic.write_flag)
  601. data.flags = MMC_DATA_WRITE;
  602. else
  603. data.flags = MMC_DATA_READ;
  604. /* data.flags must already be set before doing this. */
  605. mmc_set_data_timeout(&data, card);
  606. /* Allow overriding the timeout_ns for empirical tuning. */
  607. if (idata->ic.data_timeout_ns)
  608. data.timeout_ns = idata->ic.data_timeout_ns;
  609. if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
  610. /*
  611. * Pretend this is a data transfer and rely on the
  612. * host driver to compute timeout. When all host
  613. * drivers support cmd.cmd_timeout for R1B, this
  614. * can be changed to:
  615. *
  616. * mrq.data = NULL;
  617. * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
  618. */
  619. data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
  620. }
  621. mrq.data = &data;
  622. }
  623. mrq.cmd = &cmd;
  624. mmc_rpm_hold(card->host, &card->dev);
  625. mmc_claim_host(card->host);
  626. err = mmc_blk_part_switch(card, md);
  627. if (err)
  628. goto cmd_rel_host;
  629. if (idata->ic.is_acmd) {
  630. err = mmc_app_cmd(card->host, card);
  631. if (err)
  632. goto cmd_rel_host;
  633. }
  634. if (is_rpmb) {
  635. err = mmc_set_blockcount(card, data.blocks,
  636. idata->ic.write_flag & (1 << 31));
  637. if (err)
  638. goto cmd_rel_host;
  639. }
  640. mmc_wait_for_req(card->host, &mrq);
  641. if (cmd.error) {
  642. dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
  643. __func__, cmd.error);
  644. err = cmd.error;
  645. goto cmd_rel_host;
  646. }
  647. if (data.error) {
  648. dev_err(mmc_dev(card->host), "%s: data error %d\n",
  649. __func__, data.error);
  650. err = data.error;
  651. goto cmd_rel_host;
  652. }
  653. /*
  654. * According to the SD specs, some commands require a delay after
  655. * issuing the command.
  656. */
  657. if (idata->ic.postsleep_min_us)
  658. usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
  659. if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
  660. err = -EFAULT;
  661. goto cmd_rel_host;
  662. }
  663. if (!idata->ic.write_flag) {
  664. if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
  665. idata->buf, idata->buf_bytes)) {
  666. err = -EFAULT;
  667. goto cmd_rel_host;
  668. }
  669. }
  670. if (is_rpmb) {
  671. /*
  672. * Ensure RPMB command has completed by polling CMD13
  673. * "Send Status".
  674. */
  675. err = ioctl_rpmb_card_status_poll(card, &status, 5);
  676. if (err)
  677. dev_err(mmc_dev(card->host), "%s: Card Status=0x%08X, error %d\n", __func__, status, err);
  678. }
  679. cmd_rel_host:
  680. mmc_release_host(card->host);
  681. mmc_rpm_release(card->host, &card->dev);
  682. cmd_done:
  683. mmc_blk_put(md);
  684. blk_err:
  685. if (sg)
  686. kfree(sg);
  687. kfree(idata->buf);
  688. kfree(idata);
  689. return err;
  690. }
  691. struct mmc_blk_ioc_rpmb_data {
  692. struct mmc_blk_ioc_data *data[MMC_IOC_MAX_RPMB_CMD];
  693. };
  694. static struct mmc_blk_ioc_rpmb_data *mmc_blk_ioctl_rpmb_copy_from_user(
  695. struct mmc_ioc_rpmb __user *user)
  696. {
  697. struct mmc_blk_ioc_rpmb_data *idata;
  698. int err, i;
  699. idata = kzalloc(sizeof(*idata), GFP_KERNEL);
  700. if (!idata) {
  701. err = -ENOMEM;
  702. goto out;
  703. }
  704. for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
  705. idata->data[i] = mmc_blk_ioctl_copy_from_user(&(user->cmds[i]));
  706. if (IS_ERR(idata->data[i])) {
  707. err = PTR_ERR(idata->data[i]);
  708. goto copy_err;
  709. }
  710. }
  711. return idata;
  712. copy_err:
  713. while (--i >= 0) {
  714. kfree(idata->data[i]->buf);
  715. kfree(idata->data[i]);
  716. }
  717. kfree(idata);
  718. out:
  719. return ERR_PTR(err);
  720. }
  721. static int mmc_blk_ioctl_rpmb_cmd(struct block_device *bdev,
  722. struct mmc_ioc_rpmb __user *ic_ptr)
  723. {
  724. struct mmc_blk_ioc_rpmb_data *idata;
  725. struct mmc_blk_data *md;
  726. struct mmc_card *card;
  727. struct mmc_command cmd = {0};
  728. struct mmc_data data = {0};
  729. struct mmc_request mrq = {NULL};
  730. struct scatterlist sg;
  731. int err = 0, i = 0;
  732. u32 status = 0;
  733. /* The caller must have CAP_SYS_RAWIO */
  734. if (!capable(CAP_SYS_RAWIO))
  735. return -EPERM;
  736. md = mmc_blk_get(bdev->bd_disk);
  737. /* make sure this is a rpmb partition */
  738. if ((!md) || (!(md->area_type & MMC_BLK_DATA_AREA_RPMB))) {
  739. err = -EINVAL;
  740. return err;
  741. }
  742. idata = mmc_blk_ioctl_rpmb_copy_from_user(ic_ptr);
  743. if (IS_ERR(idata)) {
  744. err = PTR_ERR(idata);
  745. goto cmd_done;
  746. }
  747. card = md->queue.card;
  748. if (IS_ERR(card)) {
  749. err = PTR_ERR(card);
  750. goto idata_free;
  751. }
  752. mmc_rpm_hold(card->host, &card->dev);
  753. mmc_claim_host(card->host);
  754. err = mmc_blk_part_switch(card, md);
  755. if (err)
  756. goto cmd_rel_host;
  757. for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
  758. struct mmc_blk_ioc_data *curr_data;
  759. struct mmc_ioc_cmd *curr_cmd;
  760. curr_data = idata->data[i];
  761. curr_cmd = &curr_data->ic;
  762. if (!curr_cmd->opcode)
  763. break;
  764. cmd.opcode = curr_cmd->opcode;
  765. cmd.arg = curr_cmd->arg;
  766. cmd.flags = curr_cmd->flags;
  767. if (curr_data->buf_bytes) {
  768. data.sg = &sg;
  769. data.sg_len = 1;
  770. data.blksz = curr_cmd->blksz;
  771. data.blocks = curr_cmd->blocks;
  772. sg_init_one(data.sg, curr_data->buf,
  773. curr_data->buf_bytes);
  774. if (curr_cmd->write_flag)
  775. data.flags = MMC_DATA_WRITE;
  776. else
  777. data.flags = MMC_DATA_READ;
  778. /* data.flags must already be set before doing this. */
  779. mmc_set_data_timeout(&data, card);
  780. /*
  781. * Allow overriding the timeout_ns for empirical tuning.
  782. */
  783. if (curr_cmd->data_timeout_ns)
  784. data.timeout_ns = curr_cmd->data_timeout_ns;
  785. mrq.data = &data;
  786. }
  787. mrq.cmd = &cmd;
  788. err = mmc_set_blockcount(card, data.blocks,
  789. curr_cmd->write_flag & (1 << 31));
  790. if (err)
  791. goto cmd_rel_host;
  792. mmc_wait_for_req(card->host, &mrq);
  793. if (cmd.error) {
  794. dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
  795. __func__, cmd.error);
  796. err = cmd.error;
  797. goto cmd_rel_host;
  798. }
  799. if (data.error) {
  800. dev_err(mmc_dev(card->host), "%s: data error %d\n",
  801. __func__, data.error);
  802. err = data.error;
  803. goto cmd_rel_host;
  804. }
  805. if (copy_to_user(&(ic_ptr->cmds[i].response), cmd.resp,
  806. sizeof(cmd.resp))) {
  807. err = -EFAULT;
  808. goto cmd_rel_host;
  809. }
  810. if (!curr_cmd->write_flag) {
  811. if (copy_to_user((void __user *)(unsigned long)
  812. curr_cmd->data_ptr,
  813. curr_data->buf,
  814. curr_data->buf_bytes)) {
  815. err = -EFAULT;
  816. goto cmd_rel_host;
  817. }
  818. }
  819. /*
  820. * Ensure RPMB command has completed by polling CMD13
  821. * "Send Status".
  822. */
  823. err = ioctl_rpmb_card_status_poll(card, &status, 5);
  824. if (err)
  825. dev_err(mmc_dev(card->host),
  826. "%s: Card Status=0x%08X, error %d\n",
  827. __func__, status, err);
  828. }
  829. cmd_rel_host:
  830. mmc_release_host(card->host);
  831. mmc_rpm_release(card->host, &card->dev);
  832. idata_free:
  833. for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
  834. kfree(idata->data[i]->buf);
  835. kfree(idata->data[i]);
  836. }
  837. kfree(idata);
  838. cmd_done:
  839. mmc_blk_put(md);
  840. return err;
  841. }
  842. static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
  843. unsigned int cmd, unsigned long arg)
  844. {
  845. struct mmc_blk_data *md = bdev->bd_disk->private_data;
  846. struct mmc_card *card = md->queue.card;
  847. int ret = -EINVAL;
  848. #if defined(CONFIG_MMC_CPRM)
  849. printk(KERN_DEBUG " %s ], %x ", __func__, cmd);
  850. switch (cmd) {
  851. case MMC_IOCTL_SET_RETRY_AKE_PROCESS:
  852. cprm_ake_retry_flag = 1;
  853. ret = 0;
  854. break;
  855. case MMC_IOCTL_GET_SECTOR_COUNT: {
  856. int size = 0;
  857. size = (int)get_capacity(md->disk) << 9;
  858. printk(KERN_DEBUG "[%s]:MMC_IOCTL_GET_SECTOR_COUNT size = %d\n",
  859. __func__, size);
  860. return copy_to_user((void *)arg, &size, sizeof(u64));
  861. }
  862. break;
  863. case ACMD13:
  864. case ACMD18:
  865. case ACMD25:
  866. case ACMD43:
  867. case ACMD44:
  868. case ACMD45:
  869. case ACMD46:
  870. case ACMD47:
  871. case ACMD48: {
  872. struct cprm_request *req = (struct cprm_request *)arg;
  873. static int i;
  874. static unsigned long temp_arg[16] = {0};
  875. printk(KERN_DEBUG "%s:cmd [%x]\n",
  876. __func__, cmd);
  877. if (cmd == ACMD43) {
  878. printk(KERN_DEBUG"storing acmd43 arg[%d] = %ul\n",
  879. i, (unsigned int)req->arg);
  880. temp_arg[i] = req->arg;
  881. i++;
  882. if (i >= 16) {
  883. printk(KERN_DEBUG"reset acmd43 i = %d\n", i);
  884. i = 0;
  885. }
  886. }
  887. if (cmd == ACMD45 && cprm_ake_retry_flag == 1) {
  888. cprm_ake_retry_flag = 0;
  889. printk(KERN_DEBUG"ACMD45.. I'll call ACMD43 and ACMD44 first\n");
  890. for (i = 0; i < 16; i++) {
  891. printk(KERN_DEBUG"calling ACMD43 with arg[%d] = %ul\n",
  892. i, (unsigned int)temp_arg[i]);
  893. if (stub_sendcmd(card, ACMD43, temp_arg[i],
  894. 512, NULL) < 0) {
  895. printk(KERN_DEBUG"error ACMD43 %d\n",
  896. i);
  897. return -EINVAL;
  898. }
  899. }
  900. printk(KERN_DEBUG"calling ACMD44\n");
  901. if (stub_sendcmd(card, ACMD44, 0, 8, NULL) < 0) {
  902. printk(KERN_DEBUG"error in ACMD44 %d\n",
  903. i);
  904. return -EINVAL;
  905. }
  906. }
  907. return stub_sendcmd(card, req->cmd,
  908. req->arg, req->len, req->buff);
  909. }
  910. break;
  911. default:
  912. printk(KERN_DEBUG"%s: Invalid ioctl command\n", __func__);
  913. break;
  914. }
  915. #endif
  916. if (cmd == MMC_IOC_CMD)
  917. ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
  918. else if (cmd == MMC_IOC_RPMB_CMD)
  919. ret = mmc_blk_ioctl_rpmb_cmd(bdev,
  920. (struct mmc_ioc_rpmb __user *)arg);
  921. else if(cmd == MMC_IOC_CLOCK)
  922. {
  923. unsigned int clock = (unsigned int)arg;
  924. if( clock < card->host->f_min )
  925. clock = card->host->f_min;
  926. mmc_set_clock(card->host, clock);
  927. printk(KERN_DEBUG "MMC_IOC_CLOCK : %dhz\n", clock);
  928. ret = 0;
  929. }
  930. else if(cmd == MMC_IOC_BUSWIDTH)
  931. {
  932. unsigned int width = (unsigned int)arg;
  933. mmc_set_bus_width(card->host, width);
  934. printk(KERN_DEBUG "MMC_IOC_BUSWIDTH : %d\n",width);
  935. ret = 0;
  936. }
  937. return ret;
  938. }
  939. #ifdef CONFIG_COMPAT
  940. static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
  941. unsigned int cmd, unsigned long arg)
  942. {
  943. return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
  944. }
  945. #endif
  946. static const struct block_device_operations mmc_bdops = {
  947. .open = mmc_blk_open,
  948. .release = mmc_blk_release,
  949. .getgeo = mmc_blk_getgeo,
  950. .owner = THIS_MODULE,
  951. .ioctl = mmc_blk_ioctl,
  952. #ifdef CONFIG_COMPAT
  953. .compat_ioctl = mmc_blk_compat_ioctl,
  954. #endif
  955. };
  956. static inline int mmc_blk_part_switch(struct mmc_card *card,
  957. struct mmc_blk_data *md)
  958. {
  959. int ret;
  960. struct mmc_blk_data *main_md = mmc_get_drvdata(card);
  961. if ((main_md->part_curr == md->part_type) &&
  962. (card->part_curr == md->part_type))
  963. return 0;
  964. if (mmc_card_mmc(card)) {
  965. u8 part_config = card->ext_csd.part_config;
  966. part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
  967. part_config |= md->part_type;
  968. ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  969. EXT_CSD_PART_CONFIG, part_config,
  970. card->ext_csd.part_time);
  971. if (ret)
  972. return ret;
  973. card->ext_csd.part_config = part_config;
  974. card->part_curr = md->part_type;
  975. }
  976. main_md->part_curr = md->part_type;
  977. return 0;
  978. }
  979. static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
  980. {
  981. int err;
  982. u32 result;
  983. __be32 *blocks;
  984. struct mmc_request mrq = {NULL};
  985. struct mmc_command cmd = {0};
  986. struct mmc_data data = {0};
  987. struct scatterlist sg;
  988. cmd.opcode = MMC_APP_CMD;
  989. cmd.arg = card->rca << 16;
  990. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
  991. err = mmc_wait_for_cmd(card->host, &cmd, 0);
  992. if (err)
  993. return (u32)-1;
  994. if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
  995. return (u32)-1;
  996. memset(&cmd, 0, sizeof(struct mmc_command));
  997. cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
  998. cmd.arg = 0;
  999. cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  1000. data.blksz = 4;
  1001. data.blocks = 1;
  1002. data.flags = MMC_DATA_READ;
  1003. data.sg = &sg;
  1004. data.sg_len = 1;
  1005. mmc_set_data_timeout(&data, card);
  1006. mrq.cmd = &cmd;
  1007. mrq.data = &data;
  1008. blocks = kmalloc(4, GFP_KERNEL);
  1009. if (!blocks)
  1010. return (u32)-1;
  1011. sg_init_one(&sg, blocks, 4);
  1012. mmc_wait_for_req(card->host, &mrq);
  1013. result = ntohl(*blocks);
  1014. kfree(blocks);
  1015. if (cmd.error || data.error)
  1016. result = (u32)-1;
  1017. return result;
  1018. }
  1019. static int send_stop(struct mmc_card *card, u32 *status)
  1020. {
  1021. struct mmc_command cmd = {0};
  1022. int err;
  1023. cmd.opcode = MMC_STOP_TRANSMISSION;
  1024. cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
  1025. err = mmc_wait_for_cmd(card->host, &cmd, 5);
  1026. if (err == 0)
  1027. *status = cmd.resp[0];
  1028. return err;
  1029. }
  1030. static int get_card_status(struct mmc_card *card, u32 *status, int retries)
  1031. {
  1032. struct mmc_command cmd = {0};
  1033. int err;
  1034. cmd.opcode = MMC_SEND_STATUS;
  1035. if (!mmc_host_is_spi(card->host))
  1036. cmd.arg = card->rca << 16;
  1037. cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
  1038. err = mmc_wait_for_cmd(card->host, &cmd, retries);
  1039. if (err == 0)
  1040. *status = cmd.resp[0];
  1041. return err;
  1042. }
  1043. #define ERR_NOMEDIUM 3
  1044. #define ERR_RETRY 2
  1045. #define ERR_ABORT 1
  1046. #define ERR_CONTINUE 0
  1047. static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
  1048. bool status_valid, u32 status)
  1049. {
  1050. switch (error) {
  1051. case -EILSEQ:
  1052. /* response crc error, retry the r/w cmd */
  1053. pr_err("%s: %s sending %s command, card status %#x\n",
  1054. req->rq_disk->disk_name, "response CRC error",
  1055. name, status);
  1056. return ERR_RETRY;
  1057. case -ETIMEDOUT:
  1058. pr_err("%s: %s sending %s command, card status %#x\n",
  1059. req->rq_disk->disk_name, "timed out", name, status);
  1060. /* If the status cmd initially failed, retry the r/w cmd */
  1061. if (!status_valid) {
  1062. pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
  1063. return ERR_RETRY;
  1064. }
  1065. /*
  1066. * If it was a r/w cmd crc error, or illegal command
  1067. * (eg, issued in wrong state) then retry - we should
  1068. * have corrected the state problem above.
  1069. */
  1070. if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
  1071. pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
  1072. return ERR_RETRY;
  1073. }
  1074. /* Otherwise abort the command */
  1075. pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
  1076. return ERR_ABORT;
  1077. default:
  1078. /* We don't understand the error code the driver gave us */
  1079. pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
  1080. req->rq_disk->disk_name, error, status);
  1081. return ERR_ABORT;
  1082. }
  1083. }
  1084. /*
  1085. * Initial r/w and stop cmd error recovery.
  1086. * We don't know whether the card received the r/w cmd or not, so try to
  1087. * restore things back to a sane state. Essentially, we do this as follows:
  1088. * - Obtain card status. If the first attempt to obtain card status fails,
  1089. * the status word will reflect the failed status cmd, not the failed
  1090. * r/w cmd. If we fail to obtain card status, it suggests we can no
  1091. * longer communicate with the card.
  1092. * - Check the card state. If the card received the cmd but there was a
  1093. * transient problem with the response, it might still be in a data transfer
  1094. * mode. Try to send it a stop command. If this fails, we can't recover.
  1095. * - If the r/w cmd failed due to a response CRC error, it was probably
  1096. * transient, so retry the cmd.
  1097. * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
  1098. * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
  1099. * illegal cmd, retry.
  1100. * Otherwise we don't understand what happened, so abort.
  1101. */
  1102. static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
  1103. struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
  1104. {
  1105. bool prev_cmd_status_valid = true;
  1106. u32 status, stop_status = 0;
  1107. int err, retry;
  1108. if (mmc_card_removed(card))
  1109. return ERR_NOMEDIUM;
  1110. /*
  1111. * Try to get card status which indicates both the card state
  1112. * and why there was no response. If the first attempt fails,
  1113. * we can't be sure the returned status is for the r/w command.
  1114. */
  1115. for (retry = 2; retry >= 0; retry--) {
  1116. err = get_card_status(card, &status, 0);
  1117. if (!err)
  1118. break;
  1119. prev_cmd_status_valid = false;
  1120. pr_err("%s: error %d sending status command, %sing\n",
  1121. req->rq_disk->disk_name, err, retry ? "retry" : "abort");
  1122. }
  1123. /* We couldn't get a response from the card. Give up. */
  1124. if (err) {
  1125. /* Check if the card is removed */
  1126. if (mmc_detect_card_removed(card->host))
  1127. return ERR_NOMEDIUM;
  1128. return ERR_ABORT;
  1129. }
  1130. /* Flag ECC errors */
  1131. if ((status & R1_CARD_ECC_FAILED) ||
  1132. (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
  1133. (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
  1134. *ecc_err = 1;
  1135. /* Flag General errors */
  1136. if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
  1137. if ((status & R1_ERROR) ||
  1138. (brq->stop.resp[0] & R1_ERROR)) {
  1139. pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
  1140. req->rq_disk->disk_name, __func__,
  1141. brq->stop.resp[0], status);
  1142. *gen_err = 1;
  1143. }
  1144. /*
  1145. * Check the current card state. If it is in some data transfer
  1146. * mode, tell it to stop (and hopefully transition back to TRAN.)
  1147. */
  1148. if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
  1149. R1_CURRENT_STATE(status) == R1_STATE_RCV) {
  1150. err = send_stop(card, &stop_status);
  1151. if (err)
  1152. pr_err("%s: error %d sending stop command\n",
  1153. req->rq_disk->disk_name, err);
  1154. /*
  1155. * If the stop cmd also timed out, the card is probably
  1156. * not present, so abort. Other errors are bad news too.
  1157. */
  1158. if (err)
  1159. return ERR_ABORT;
  1160. if (stop_status & R1_CARD_ECC_FAILED)
  1161. *ecc_err = 1;
  1162. if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
  1163. if (stop_status & R1_ERROR) {
  1164. pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
  1165. req->rq_disk->disk_name, __func__,
  1166. stop_status);
  1167. *gen_err = 1;
  1168. }
  1169. }
  1170. /* Check for set block count errors */
  1171. if (brq->sbc.error)
  1172. return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
  1173. prev_cmd_status_valid, status);
  1174. /* Check for r/w command errors */
  1175. if (brq->cmd.error)
  1176. return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
  1177. prev_cmd_status_valid, status);
  1178. /* Data errors */
  1179. if (!brq->stop.error)
  1180. return ERR_CONTINUE;
  1181. /* Now for stop errors. These aren't fatal to the transfer. */
  1182. pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
  1183. req->rq_disk->disk_name, brq->stop.error,
  1184. brq->cmd.resp[0], status);
  1185. /*
  1186. * Subsitute in our own stop status as this will give the error
  1187. * state which happened during the execution of the r/w command.
  1188. */
  1189. if (stop_status) {
  1190. brq->stop.resp[0] = stop_status;
  1191. brq->stop.error = 0;
  1192. }
  1193. return ERR_CONTINUE;
  1194. }
  1195. static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
  1196. int type)
  1197. {
  1198. int err;
  1199. if (md->reset_done & type)
  1200. return -EEXIST;
  1201. md->reset_done |= type;
  1202. err = mmc_hw_reset(host);
  1203. /* Ensure we switch back to the correct partition */
  1204. if (err != -EOPNOTSUPP) {
  1205. struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
  1206. int part_err;
  1207. main_md->part_curr = main_md->part_type;
  1208. part_err = mmc_blk_part_switch(host->card, md);
  1209. if (part_err) {
  1210. /*
  1211. * We have failed to get back into the correct
  1212. * partition, so we need to abort the whole request.
  1213. */
  1214. return -ENODEV;
  1215. }
  1216. }
  1217. return err;
  1218. }
  1219. static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
  1220. {
  1221. md->reset_done &= ~type;
  1222. }
  1223. static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
  1224. {
  1225. struct mmc_blk_data *md = mq->data;
  1226. struct mmc_card *card = md->queue.card;
  1227. unsigned int from, nr, arg;
  1228. int err = 0, type = MMC_BLK_DISCARD;
  1229. if (!mmc_can_erase(card)) {
  1230. err = -EOPNOTSUPP;
  1231. goto out;
  1232. }
  1233. from = blk_rq_pos(req);
  1234. nr = blk_rq_sectors(req);
  1235. if (card->ext_csd.bkops_en)
  1236. card->bkops_info.sectors_changed += blk_rq_sectors(req);
  1237. if (mmc_can_discard(card))
  1238. arg = MMC_DISCARD_ARG;
  1239. else if (mmc_can_trim(card))
  1240. arg = MMC_TRIM_ARG;
  1241. else
  1242. arg = MMC_ERASE_ARG;
  1243. retry:
  1244. if (card->quirks & MMC_QUIRK_INAND_CMD38) {
  1245. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1246. INAND_CMD38_ARG_EXT_CSD,
  1247. arg == MMC_TRIM_ARG ?
  1248. INAND_CMD38_ARG_TRIM :
  1249. INAND_CMD38_ARG_ERASE,
  1250. 0);
  1251. if (err)
  1252. goto out;
  1253. }
  1254. err = mmc_erase(card, from, nr, arg);
  1255. out:
  1256. if (err == -EIO && !mmc_blk_reset(md, card->host, type))
  1257. goto retry;
  1258. if (!err)
  1259. mmc_blk_reset_success(md, type);
  1260. blk_end_request(req, err, blk_rq_bytes(req));
  1261. return err ? 0 : 1;
  1262. }
  1263. static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
  1264. struct request *req)
  1265. {
  1266. struct mmc_blk_data *md = mq->data;
  1267. struct mmc_card *card = md->queue.card;
  1268. unsigned int from, nr, arg;
  1269. int err = 0, type = MMC_BLK_SECDISCARD;
  1270. if (!(mmc_can_secure_erase_trim(card))) {
  1271. err = -EOPNOTSUPP;
  1272. goto out;
  1273. }
  1274. from = blk_rq_pos(req);
  1275. nr = blk_rq_sectors(req);
  1276. if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
  1277. arg = MMC_SECURE_TRIM1_ARG;
  1278. else
  1279. arg = MMC_SECURE_ERASE_ARG;
  1280. retry:
  1281. if (card->quirks & MMC_QUIRK_INAND_CMD38) {
  1282. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1283. INAND_CMD38_ARG_EXT_CSD,
  1284. arg == MMC_SECURE_TRIM1_ARG ?
  1285. INAND_CMD38_ARG_SECTRIM1 :
  1286. INAND_CMD38_ARG_SECERASE,
  1287. 0);
  1288. if (err)
  1289. goto out_retry;
  1290. }
  1291. err = mmc_erase(card, from, nr, arg);
  1292. if (err == -EIO)
  1293. goto out_retry;
  1294. if (err)
  1295. goto out;
  1296. if (arg == MMC_SECURE_TRIM1_ARG) {
  1297. if (card->quirks & MMC_QUIRK_INAND_CMD38) {
  1298. err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
  1299. INAND_CMD38_ARG_EXT_CSD,
  1300. INAND_CMD38_ARG_SECTRIM2,
  1301. 0);
  1302. if (err)
  1303. goto out_retry;
  1304. }
  1305. err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
  1306. if (err == -EIO)
  1307. goto out_retry;
  1308. if (err)
  1309. goto out;
  1310. }
  1311. out_retry:
  1312. if (err && !mmc_blk_reset(md, card->host, type))
  1313. goto retry;
  1314. if (!err)
  1315. mmc_blk_reset_success(md, type);
  1316. out:
  1317. blk_end_request(req, err, blk_rq_bytes(req));
  1318. return err ? 0 : 1;
  1319. }
  1320. static int mmc_blk_issue_sanitize_rq(struct mmc_queue *mq,
  1321. struct request *req)
  1322. {
  1323. struct mmc_blk_data *md = mq->data;
  1324. struct mmc_card *card = md->queue.card;
  1325. int err = 0;
  1326. BUG_ON(!card);
  1327. BUG_ON(!card->host);
  1328. if (!(mmc_can_sanitize(card) &&
  1329. (card->host->caps2 & MMC_CAP2_SANITIZE))) {
  1330. pr_warning("%s: %s - SANITIZE is not supported\n",
  1331. mmc_hostname(card->host), __func__);
  1332. err = -EOPNOTSUPP;
  1333. goto out;
  1334. }
  1335. pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
  1336. mmc_hostname(card->host), __func__);
  1337. trace_mmc_blk_erase_start(EXT_CSD_SANITIZE_START, 0, 0);
  1338. err = mmc_switch_ignore_timeout(card, EXT_CSD_CMD_SET_NORMAL,
  1339. EXT_CSD_SANITIZE_START, 1,
  1340. MMC_SANITIZE_REQ_TIMEOUT);
  1341. trace_mmc_blk_erase_end(EXT_CSD_SANITIZE_START, 0, 0);
  1342. if (err)
  1343. pr_err("%s: %s - mmc_switch() with "
  1344. "EXT_CSD_SANITIZE_START failed. err=%d\n",
  1345. mmc_hostname(card->host), __func__, err);
  1346. pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
  1347. __func__);
  1348. out:
  1349. blk_end_request(req, err, blk_rq_bytes(req));
  1350. return err ? 0 : 1;
  1351. }
  1352. static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
  1353. {
  1354. struct mmc_blk_data *md = mq->data;
  1355. struct request_queue *q = mq->queue;
  1356. struct mmc_card *card = md->queue.card;
  1357. int ret = 0;
  1358. ret = mmc_flush_cache(card);
  1359. if (ret == -ETIMEDOUT) {
  1360. pr_info("%s: requeue flush request after timeout", __func__);
  1361. spin_lock_irq(q->queue_lock);
  1362. blk_requeue_request(q, req);
  1363. spin_unlock_irq(q->queue_lock);
  1364. ret = 0;
  1365. goto exit;
  1366. } else if (ret) {
  1367. pr_err("%s: notify flush error to upper layers", __func__);
  1368. ret = -EIO;
  1369. }
  1370. blk_end_request_all(req, ret);
  1371. exit:
  1372. return ret ? 0 : 1;
  1373. }
  1374. /*
  1375. * Reformat current write as a reliable write, supporting
  1376. * both legacy and the enhanced reliable write MMC cards.
  1377. * In each transfer we'll handle only as much as a single
  1378. * reliable write can handle, thus finish the request in
  1379. * partial completions.
  1380. */
  1381. static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
  1382. struct mmc_card *card,
  1383. struct request *req)
  1384. {
  1385. if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
  1386. /* Legacy mode imposes restrictions on transfers. */
  1387. if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
  1388. brq->data.blocks = 1;
  1389. if (brq->data.blocks > card->ext_csd.rel_sectors)
  1390. brq->data.blocks = card->ext_csd.rel_sectors;
  1391. else if (brq->data.blocks < card->ext_csd.rel_sectors)
  1392. brq->data.blocks = 1;
  1393. }
  1394. }
  1395. #define CMD_ERRORS \
  1396. (R1_OUT_OF_RANGE | /* Command argument out of range */ \
  1397. R1_ADDRESS_ERROR | /* Misaligned address */ \
  1398. R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
  1399. R1_WP_VIOLATION | /* Tried to write to protected block */ \
  1400. R1_CC_ERROR | /* Card controller error */ \
  1401. R1_ERROR) /* General/unknown error */
  1402. static int mmc_blk_err_check(struct mmc_card *card,
  1403. struct mmc_async_req *areq)
  1404. {
  1405. struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
  1406. mmc_active);
  1407. struct mmc_blk_request *brq = &mq_mrq->brq;
  1408. struct request *req = mq_mrq->req;
  1409. int ecc_err = 0, gen_err = 0;
  1410. /*
  1411. * sbc.error indicates a problem with the set block count
  1412. * command. No data will have been transferred.
  1413. *
  1414. * cmd.error indicates a problem with the r/w command. No
  1415. * data will have been transferred.
  1416. *
  1417. * stop.error indicates a problem with the stop command. Data
  1418. * may have been transferred, or may still be transferring.
  1419. */
  1420. if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
  1421. brq->data.error) {
  1422. switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
  1423. case ERR_RETRY:
  1424. return MMC_BLK_RETRY;
  1425. case ERR_ABORT:
  1426. return MMC_BLK_ABORT;
  1427. case ERR_NOMEDIUM:
  1428. return MMC_BLK_NOMEDIUM;
  1429. case ERR_CONTINUE:
  1430. break;
  1431. }
  1432. }
  1433. /*
  1434. * Check for errors relating to the execution of the
  1435. * initial command - such as address errors. No data
  1436. * has been transferred.
  1437. */
  1438. if (brq->cmd.resp[0] & CMD_ERRORS) {
  1439. pr_err("%s: r/w command failed, status = %#x\n",
  1440. req->rq_disk->disk_name, brq->cmd.resp[0]);
  1441. return MMC_BLK_ABORT;
  1442. }
  1443. /*
  1444. * Everything else is either success, or a data error of some
  1445. * kind. If it was a write, we may have transitioned to
  1446. * program mode, which we have to wait for it to complete.
  1447. */
  1448. if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
  1449. u32 status;
  1450. unsigned long timeout;
  1451. timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
  1452. /* Check stop command response */
  1453. if (brq->stop.resp[0] & R1_ERROR) {
  1454. pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
  1455. req->rq_disk->disk_name, __func__,
  1456. brq->stop.resp[0]);
  1457. gen_err = 1;
  1458. }
  1459. do {
  1460. int err = get_card_status(card, &status, 5);
  1461. if (err) {
  1462. pr_err("%s: error %d requesting status\n",
  1463. req->rq_disk->disk_name, err);
  1464. return MMC_BLK_CMD_ERR;
  1465. }
  1466. /* Timeout if the device never becomes ready for data
  1467. * and never leaves the program state.
  1468. */
  1469. if (time_after(jiffies, timeout)) {
  1470. pr_err("%s: Card stuck in programming state!"\
  1471. " %s %s\n", mmc_hostname(card->host),
  1472. req->rq_disk->disk_name, __func__);
  1473. return MMC_BLK_CMD_ERR;
  1474. }
  1475. if (status & R1_ERROR) {
  1476. pr_err("%s: %s: general error sending status command, card status %#x\n",
  1477. req->rq_disk->disk_name, __func__,
  1478. status);
  1479. gen_err = 1;
  1480. }
  1481. /*
  1482. * Some cards mishandle the status bits,
  1483. * so make sure to check both the busy
  1484. * indication and the card state.
  1485. */
  1486. } while (!(status & R1_READY_FOR_DATA) ||
  1487. (R1_CURRENT_STATE(status) == R1_STATE_PRG));
  1488. }
  1489. /* if general error occurs, retry the write operation. */
  1490. if (gen_err) {
  1491. pr_warning("%s: retrying write for general error\n",
  1492. req->rq_disk->disk_name);
  1493. return MMC_BLK_RETRY;
  1494. }
  1495. if (brq->data.error) {
  1496. pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
  1497. req->rq_disk->disk_name, brq->data.error,
  1498. (unsigned)blk_rq_pos(req),
  1499. (unsigned)blk_rq_sectors(req),
  1500. brq->cmd.resp[0], brq->stop.resp[0]);
  1501. if (rq_data_dir(req) == READ) {
  1502. if (ecc_err)
  1503. return MMC_BLK_ECC_ERR;
  1504. return MMC_BLK_DATA_ERR;
  1505. } else {
  1506. return MMC_BLK_CMD_ERR;
  1507. }
  1508. }
  1509. if (!brq->data.bytes_xfered)
  1510. return MMC_BLK_RETRY;
  1511. if (mq_mrq->packed_cmd != MMC_PACKED_NONE) {
  1512. if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
  1513. return MMC_BLK_PARTIAL;
  1514. else
  1515. return MMC_BLK_SUCCESS;
  1516. }
  1517. if (blk_rq_bytes(req) != brq->data.bytes_xfered)
  1518. return MMC_BLK_PARTIAL;
  1519. return MMC_BLK_SUCCESS;
  1520. }
  1521. /*
  1522. * mmc_blk_reinsert_req() - re-insert request back to the scheduler
  1523. * @areq: request to re-insert.
  1524. *
  1525. * Request may be packed or single. When fails to reinsert request, it will be
  1526. * requeued to the the dispatch queue.
  1527. */
  1528. static void mmc_blk_reinsert_req(struct mmc_async_req *areq)
  1529. {
  1530. struct request *prq;
  1531. int ret = 0;
  1532. struct mmc_queue_req *mq_rq;
  1533. struct request_queue *q;
  1534. mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
  1535. q = mq_rq->req->q;
  1536. if (mq_rq->packed_cmd != MMC_PACKED_NONE) {
  1537. while (!list_empty(&mq_rq->packed_list)) {
  1538. /* return requests in reverse order */
  1539. prq = list_entry_rq(mq_rq->packed_list.prev);
  1540. list_del_init(&prq->queuelist);
  1541. spin_lock_irq(q->queue_lock);
  1542. ret = blk_reinsert_request(q, prq);
  1543. if (ret) {
  1544. blk_requeue_request(q, prq);
  1545. spin_unlock_irq(q->queue_lock);
  1546. goto reinsert_error;
  1547. }
  1548. spin_unlock_irq(q->queue_lock);
  1549. }
  1550. } else {
  1551. spin_lock_irq(q->queue_lock);
  1552. ret = blk_reinsert_request(q, mq_rq->req);
  1553. if (ret)
  1554. blk_requeue_request(q, mq_rq->req);
  1555. spin_unlock_irq(q->queue_lock);
  1556. }
  1557. return;
  1558. reinsert_error:
  1559. pr_err("%s: blk_reinsert_request() failed (%d)",
  1560. mq_rq->req->rq_disk->disk_name, ret);
  1561. /*
  1562. * -EIO will be reported for this request and rest of packed_list.
  1563. * Urgent request will be proceeded anyway, while upper layer
  1564. * responsibility to re-send failed requests
  1565. */
  1566. while (!list_empty(&mq_rq->packed_list)) {
  1567. prq = list_entry_rq(mq_rq->packed_list.next);
  1568. list_del_init(&prq->queuelist);
  1569. spin_lock_irq(q->queue_lock);
  1570. blk_requeue_request(q, prq);
  1571. spin_unlock_irq(q->queue_lock);
  1572. }
  1573. }
  1574. /*
  1575. * mmc_blk_update_interrupted_req() - update of the stopped request
  1576. * @card: the MMC card associated with the request.
  1577. * @areq: interrupted async request.
  1578. *
  1579. * Get stopped request state from card and update successfully done part of
  1580. * the request by setting packed_fail_idx. The packed_fail_idx is index of
  1581. * first uncompleted request in packed request list, for non-packed request
  1582. * packed_fail_idx remains unchanged.
  1583. *
  1584. * Returns: MMC_BLK_SUCCESS for success, MMC_BLK_ABORT otherwise
  1585. */
  1586. static int mmc_blk_update_interrupted_req(struct mmc_card *card,
  1587. struct mmc_async_req *areq)
  1588. {
  1589. int ret = MMC_BLK_SUCCESS;
  1590. u8 *ext_csd;
  1591. int correctly_done;
  1592. struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
  1593. mmc_active);
  1594. struct request *prq;
  1595. u8 req_index = 0;
  1596. if (mq_rq->packed_cmd == MMC_PACKED_NONE)
  1597. return MMC_BLK_SUCCESS;
  1598. ext_csd = kmalloc(512, GFP_KERNEL);
  1599. if (!ext_csd)
  1600. return MMC_BLK_ABORT;
  1601. /* get correctly programmed sectors number from card */
  1602. ret = mmc_send_ext_csd(card, ext_csd);
  1603. if (ret) {
  1604. pr_err("%s: error %d reading ext_csd\n",
  1605. mmc_hostname(card->host), ret);
  1606. ret = MMC_BLK_ABORT;
  1607. goto exit;
  1608. }
  1609. correctly_done = card->ext_csd.data_sector_size *
  1610. (ext_csd[EXT_CSD_CORRECTLY_PRG_SECTORS_NUM + 0] << 0 |
  1611. ext_csd[EXT_CSD_CORRECTLY_PRG_SECTORS_NUM + 1] << 8 |
  1612. ext_csd[EXT_CSD_CORRECTLY_PRG_SECTORS_NUM + 2] << 16 |
  1613. ext_csd[EXT_CSD_CORRECTLY_PRG_SECTORS_NUM + 3] << 24);
  1614. /*
  1615. * skip packed command header (1 sector) included by the counter but not
  1616. * actually written to the NAND
  1617. */
  1618. if (correctly_done >= card->ext_csd.data_sector_size)
  1619. correctly_done -= card->ext_csd.data_sector_size;
  1620. list_for_each_entry(prq, &mq_rq->packed_list, queuelist) {
  1621. if ((correctly_done - (int)blk_rq_bytes(prq)) < 0) {
  1622. /* prq is not successfull */
  1623. mq_rq->packed_fail_idx = req_index;
  1624. break;
  1625. }
  1626. correctly_done -= blk_rq_bytes(prq);
  1627. req_index++;
  1628. }
  1629. exit:
  1630. kfree(ext_csd);
  1631. return ret;
  1632. }
  1633. static int mmc_blk_packed_err_check(struct mmc_card *card,
  1634. struct mmc_async_req *areq)
  1635. {
  1636. struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
  1637. mmc_active);
  1638. struct request *req = mq_rq->req;
  1639. int err, check, status;
  1640. u8 ext_csd[512];
  1641. mq_rq->packed_retries--;
  1642. check = mmc_blk_err_check(card, areq);
  1643. err = get_card_status(card, &status, 0);
  1644. if (err) {
  1645. pr_err("%s: error %d sending status command\n",
  1646. req->rq_disk->disk_name, err);
  1647. return MMC_BLK_ABORT;
  1648. }
  1649. if (status & R1_EXCEPTION_EVENT) {
  1650. err = mmc_send_ext_csd(card, ext_csd);
  1651. if (err) {
  1652. pr_err("%s: error %d sending ext_csd\n",
  1653. req->rq_disk->disk_name, err);
  1654. return MMC_BLK_ABORT;
  1655. }
  1656. if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
  1657. EXT_CSD_PACKED_FAILURE) &&
  1658. (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
  1659. EXT_CSD_PACKED_GENERIC_ERROR)) {
  1660. if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
  1661. EXT_CSD_PACKED_INDEXED_ERROR) {
  1662. mq_rq->packed_fail_idx =
  1663. ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
  1664. return MMC_BLK_PARTIAL;
  1665. }
  1666. }
  1667. }
  1668. return check;
  1669. }
  1670. static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
  1671. struct mmc_card *card,
  1672. int disable_multi,
  1673. struct mmc_queue *mq)
  1674. {
  1675. u32 readcmd, writecmd;
  1676. struct mmc_blk_request *brq = &mqrq->brq;
  1677. struct request *req = mqrq->req;
  1678. struct mmc_blk_data *md = mq->data;
  1679. bool do_data_tag;
  1680. unsigned long flags;
  1681. /*
  1682. * Reliable writes are used to implement Forced Unit Access and
  1683. * REQ_META accesses, and are supported only on MMCs.
  1684. *
  1685. * XXX: this really needs a good explanation of why REQ_META
  1686. * is treated special.
  1687. */
  1688. bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
  1689. (req->cmd_flags & REQ_META)) &&
  1690. (rq_data_dir(req) == WRITE) &&
  1691. (md->flags & MMC_BLK_REL_WR);
  1692. spin_lock_irqsave(&card->host->mrq_lock, flags);
  1693. memset(brq, 0, sizeof(struct mmc_blk_request));
  1694. brq->mrq.cmd = &brq->cmd;
  1695. brq->mrq.data = &brq->data;
  1696. brq->cmd.arg = blk_rq_pos(req);
  1697. if (!mmc_card_blockaddr(card))
  1698. brq->cmd.arg <<= 9;
  1699. brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  1700. brq->data.blksz = 512;
  1701. brq->stop.opcode = MMC_STOP_TRANSMISSION;
  1702. brq->stop.arg = 0;
  1703. brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
  1704. brq->data.blocks = blk_rq_sectors(req);
  1705. brq->data.fault_injected = false;
  1706. /*
  1707. * The block layer doesn't support all sector count
  1708. * restrictions, so we need to be prepared for too big
  1709. * requests.
  1710. */
  1711. if (brq->data.blocks > card->host->max_blk_count)
  1712. brq->data.blocks = card->host->max_blk_count;
  1713. if (brq->data.blocks > 1) {
  1714. /*
  1715. * After a read error, we redo the request one sector
  1716. * at a time in order to accurately determine which
  1717. * sectors can be read successfully.
  1718. */
  1719. if (disable_multi)
  1720. brq->data.blocks = 1;
  1721. /* Some controllers can't do multiblock reads due to hw bugs */
  1722. if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
  1723. rq_data_dir(req) == READ)
  1724. brq->data.blocks = 1;
  1725. }
  1726. if (brq->data.blocks > 1 || do_rel_wr) {
  1727. /* SPI multiblock writes terminate using a special
  1728. * token, not a STOP_TRANSMISSION request.
  1729. */
  1730. if (!mmc_host_is_spi(card->host) ||
  1731. rq_data_dir(req) == READ)
  1732. brq->mrq.stop = &brq->stop;
  1733. readcmd = MMC_READ_MULTIPLE_BLOCK;
  1734. writecmd = MMC_WRITE_MULTIPLE_BLOCK;
  1735. } else {
  1736. brq->mrq.stop = NULL;
  1737. readcmd = MMC_READ_SINGLE_BLOCK;
  1738. writecmd = MMC_WRITE_BLOCK;
  1739. }
  1740. if (rq_data_dir(req) == READ) {
  1741. brq->cmd.opcode = readcmd;
  1742. brq->data.flags |= MMC_DATA_READ;
  1743. } else {
  1744. brq->cmd.opcode = writecmd;
  1745. brq->data.flags |= MMC_DATA_WRITE;
  1746. }
  1747. if (do_rel_wr)
  1748. mmc_apply_rel_rw(brq, card, req);
  1749. /*
  1750. * Data tag is used only during writing meta data to speed
  1751. * up write and any subsequent read of this meta data
  1752. */
  1753. do_data_tag = (card->ext_csd.data_tag_unit_size) &&
  1754. (req->cmd_flags & REQ_META) &&
  1755. (rq_data_dir(req) == WRITE) &&
  1756. ((brq->data.blocks * brq->data.blksz) >=
  1757. card->ext_csd.data_tag_unit_size);
  1758. /*
  1759. * Pre-defined multi-block transfers are preferable to
  1760. * open ended-ones (and necessary for reliable writes).
  1761. * However, it is not sufficient to just send CMD23,
  1762. * and avoid the final CMD12, as on an error condition
  1763. * CMD12 (stop) needs to be sent anyway. This, coupled
  1764. * with Auto-CMD23 enhancements provided by some
  1765. * hosts, means that the complexity of dealing
  1766. * with this is best left to the host. If CMD23 is
  1767. * supported by card and host, we'll fill sbc in and let
  1768. * the host deal with handling it correctly. This means
  1769. * that for hosts that don't expose MMC_CAP_CMD23, no
  1770. * change of behavior will be observed.
  1771. *
  1772. * N.B: Some MMC cards experience perf degradation.
  1773. * We'll avoid using CMD23-bounded multiblock writes for
  1774. * these, while retaining features like reliable writes.
  1775. */
  1776. if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
  1777. (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
  1778. do_data_tag)) {
  1779. brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
  1780. brq->sbc.arg = brq->data.blocks |
  1781. (do_rel_wr ? (1 << 31) : 0) |
  1782. (do_data_tag ? (1 << 29) : 0);
  1783. brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
  1784. brq->mrq.sbc = &brq->sbc;
  1785. }
  1786. mmc_set_data_timeout(&brq->data, card);
  1787. brq->data.sg = mqrq->sg;
  1788. brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
  1789. /*
  1790. * Adjust the sg list so it is the same size as the
  1791. * request.
  1792. */
  1793. if (brq->data.blocks != blk_rq_sectors(req)) {
  1794. int i, data_size = brq->data.blocks << 9;
  1795. struct scatterlist *sg;
  1796. for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
  1797. data_size -= sg->length;
  1798. if (data_size <= 0) {
  1799. sg->length += data_size;
  1800. i++;
  1801. break;
  1802. }
  1803. }
  1804. brq->data.sg_len = i;
  1805. }
  1806. mqrq->mmc_active.mrq = &brq->mrq;
  1807. mqrq->mmc_active.cmd_flags = req->cmd_flags;
  1808. spin_unlock_irqrestore(&card->host->mrq_lock, flags);
  1809. if (mq->err_check_fn)
  1810. mqrq->mmc_active.err_check = mq->err_check_fn;
  1811. else
  1812. mqrq->mmc_active.err_check = mmc_blk_err_check;
  1813. mqrq->mmc_active.reinsert_req = mmc_blk_reinsert_req;
  1814. mqrq->mmc_active.update_interrupted_req =
  1815. mmc_blk_update_interrupted_req;
  1816. mmc_queue_bounce_pre(mqrq);
  1817. }
  1818. /**
  1819. * mmc_blk_disable_wr_packing() - disables packing mode
  1820. * @mq: MMC queue.
  1821. *
  1822. */
  1823. void mmc_blk_disable_wr_packing(struct mmc_queue *mq)
  1824. {
  1825. if (mq) {
  1826. mq->wr_packing_enabled = false;
  1827. mq->num_of_potential_packed_wr_reqs = 0;
  1828. }
  1829. }
  1830. EXPORT_SYMBOL(mmc_blk_disable_wr_packing);
  1831. static int get_packed_trigger(int potential, struct mmc_card *card,
  1832. struct request *req, int curr_trigger)
  1833. {
  1834. static int num_mean_elements = 1;
  1835. static unsigned long mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
  1836. unsigned int trigger = curr_trigger;
  1837. unsigned int pckd_trgr_upper_bound = card->ext_csd.max_packed_writes;
  1838. /* scale down the upper bound to 75% */
  1839. pckd_trgr_upper_bound = (pckd_trgr_upper_bound * 3) / 4;
  1840. /*
  1841. * since the most common calls for this function are with small
  1842. * potential write values and since we don't want these calls to affect
  1843. * the packed trigger, set a lower bound and ignore calls with
  1844. * potential lower than that bound
  1845. */
  1846. if (potential <= PCKD_TRGR_POTEN_LOWER_BOUND)
  1847. return trigger;
  1848. /*
  1849. * this is to prevent integer overflow in the following calculation:
  1850. * once every PACKED_TRIGGER_MAX_ELEMENTS reset the algorithm
  1851. */
  1852. if (num_mean_elements > PACKED_TRIGGER_MAX_ELEMENTS) {
  1853. num_mean_elements = 1;
  1854. mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
  1855. }
  1856. /*
  1857. * get next mean value based on previous mean value and current
  1858. * potential packed writes. Calculation is as follows:
  1859. * mean_pot[i+1] =
  1860. * ((mean_pot[i] * num_mean_elem) + potential)/(num_mean_elem + 1)
  1861. */
  1862. mean_potential *= num_mean_elements;
  1863. /*
  1864. * add num_mean_elements so that the division of two integers doesn't
  1865. * lower mean_potential too much
  1866. */
  1867. if (potential > mean_potential)
  1868. mean_potential += num_mean_elements;
  1869. mean_potential += potential;
  1870. /* this is for gaining more precision when dividing two integers */
  1871. mean_potential *= PCKD_TRGR_PRECISION_MULTIPLIER;
  1872. /* this completes the mean calculation */
  1873. mean_potential /= ++num_mean_elements;
  1874. mean_potential /= PCKD_TRGR_PRECISION_MULTIPLIER;
  1875. /*
  1876. * if current potential packed writes is greater than the mean potential
  1877. * then the heuristic is that the following workload will contain many
  1878. * write requests, therefore we lower the packed trigger. In the
  1879. * opposite case we want to increase the trigger in order to get less
  1880. * packing events.
  1881. */
  1882. if (potential >= mean_potential)
  1883. trigger = (trigger <= PCKD_TRGR_LOWER_BOUND) ?
  1884. PCKD_TRGR_LOWER_BOUND : trigger - 1;
  1885. else
  1886. trigger = (trigger >= pckd_trgr_upper_bound) ?
  1887. pckd_trgr_upper_bound : trigger + 1;
  1888. /*
  1889. * an urgent read request indicates a packed list being interrupted
  1890. * by this read, therefore we aim for less packing, hence the trigger
  1891. * gets increased
  1892. */
  1893. if (req && (req->cmd_flags & REQ_URGENT) && (rq_data_dir(req) == READ))
  1894. trigger += PCKD_TRGR_URGENT_PENALTY;
  1895. return trigger;
  1896. }
  1897. static void mmc_blk_write_packing_control(struct mmc_queue *mq,
  1898. struct request *req)
  1899. {
  1900. struct mmc_host *host = mq->card->host;
  1901. int data_dir;
  1902. if (!(host->caps2 & MMC_CAP2_PACKED_WR))
  1903. return;
  1904. /* Support for the write packing on eMMC 4.5 or later */
  1905. if (mq->card->ext_csd.rev <= 5)
  1906. return;
  1907. /*
  1908. * In case the packing control is not supported by the host, it should
  1909. * not have an effect on the write packing. Therefore we have to enable
  1910. * the write packing
  1911. */
  1912. if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
  1913. mq->wr_packing_enabled = true;
  1914. return;
  1915. }
  1916. if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
  1917. if (mq->num_of_potential_packed_wr_reqs >
  1918. mq->num_wr_reqs_to_start_packing)
  1919. mq->wr_packing_enabled = true;
  1920. mq->num_wr_reqs_to_start_packing =
  1921. get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
  1922. mq->card, req,
  1923. mq->num_wr_reqs_to_start_packing);
  1924. mq->num_of_potential_packed_wr_reqs = 0;
  1925. return;
  1926. }
  1927. data_dir = rq_data_dir(req);
  1928. if (data_dir == READ) {
  1929. mmc_blk_disable_wr_packing(mq);
  1930. mq->num_wr_reqs_to_start_packing =
  1931. get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
  1932. mq->card, req,
  1933. mq->num_wr_reqs_to_start_packing);
  1934. mq->num_of_potential_packed_wr_reqs = 0;
  1935. mq->wr_packing_enabled = false;
  1936. return;
  1937. } else if (data_dir == WRITE) {
  1938. mq->num_of_potential_packed_wr_reqs++;
  1939. }
  1940. if (mq->num_of_potential_packed_wr_reqs >
  1941. mq->num_wr_reqs_to_start_packing)
  1942. mq->wr_packing_enabled = true;
  1943. }
  1944. struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
  1945. {
  1946. if (!card)
  1947. return NULL;
  1948. return &card->wr_pack_stats;
  1949. }
  1950. EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
  1951. void mmc_blk_init_packed_statistics(struct mmc_card *card)
  1952. {
  1953. int max_num_of_packed_reqs = 0;
  1954. if (!card || !card->wr_pack_stats.packing_events)
  1955. return;
  1956. max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
  1957. spin_lock(&card->wr_pack_stats.lock);
  1958. memset(card->wr_pack_stats.packing_events, 0,
  1959. (max_num_of_packed_reqs + 1) *
  1960. sizeof(*card->wr_pack_stats.packing_events));
  1961. memset(&card->wr_pack_stats.pack_stop_reason, 0,
  1962. sizeof(card->wr_pack_stats.pack_stop_reason));
  1963. card->wr_pack_stats.enabled = true;
  1964. spin_unlock(&card->wr_pack_stats.lock);
  1965. }
  1966. EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
  1967. static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
  1968. {
  1969. struct request_queue *q = mq->queue;
  1970. struct mmc_card *card = mq->card;
  1971. struct request *cur = req, *next = NULL;
  1972. struct mmc_blk_data *md = mq->data;
  1973. bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
  1974. unsigned int req_sectors = 0, phys_segments = 0;
  1975. unsigned int max_blk_count, max_phys_segs;
  1976. u8 put_back = 0;
  1977. u8 max_packed_rw = 0;
  1978. u8 reqs = 0;
  1979. struct mmc_wr_pack_stats *stats = &card->wr_pack_stats;
  1980. mmc_blk_clear_packed(mq->mqrq_cur);
  1981. if (!(md->flags & MMC_BLK_CMD23) ||
  1982. !card->ext_csd.packed_event_en)
  1983. goto no_packed;
  1984. if (!mq->wr_packing_enabled)
  1985. goto no_packed;
  1986. if ((rq_data_dir(cur) == WRITE) &&
  1987. (card->host->caps2 & MMC_CAP2_PACKED_WR))
  1988. max_packed_rw = card->ext_csd.max_packed_writes;
  1989. if (max_packed_rw == 0)
  1990. goto no_packed;
  1991. if (mmc_req_rel_wr(cur) &&
  1992. (md->flags & MMC_BLK_REL_WR) &&
  1993. !en_rel_wr)
  1994. goto no_packed;
  1995. if (mmc_large_sec(card) &&
  1996. !IS_ALIGNED(blk_rq_sectors(cur), 8))
  1997. goto no_packed;
  1998. if (cur->cmd_flags & REQ_FUA)
  1999. goto no_packed;
  2000. max_blk_count = min(card->host->max_blk_count,
  2001. card->host->max_req_size >> 9);
  2002. if (unlikely(max_blk_count > 0xffff))
  2003. max_blk_count = 0xffff;
  2004. max_phys_segs = queue_max_segments(q);
  2005. req_sectors += blk_rq_sectors(cur);
  2006. phys_segments += cur->nr_phys_segments;
  2007. if (rq_data_dir(cur) == WRITE) {
  2008. req_sectors++;
  2009. phys_segments++;
  2010. }
  2011. spin_lock(&stats->lock);
  2012. while (reqs < max_packed_rw - 1) {
  2013. spin_lock_irq(q->queue_lock);
  2014. next = blk_fetch_request(q);
  2015. spin_unlock_irq(q->queue_lock);
  2016. if (!next) {
  2017. MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
  2018. break;
  2019. }
  2020. if (mmc_large_sec(card) &&
  2021. !IS_ALIGNED(blk_rq_sectors(next), 8)) {
  2022. MMC_BLK_UPDATE_STOP_REASON(stats, LARGE_SEC_ALIGN);
  2023. put_back = 1;
  2024. break;
  2025. }
  2026. if (next->cmd_flags & REQ_DISCARD ||
  2027. next->cmd_flags & REQ_FLUSH) {
  2028. MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
  2029. put_back = 1;
  2030. break;
  2031. }
  2032. if (next->cmd_flags & REQ_FUA) {
  2033. MMC_BLK_UPDATE_STOP_REASON(stats, FUA);
  2034. put_back = 1;
  2035. break;
  2036. }
  2037. if (rq_data_dir(cur) != rq_data_dir(next)) {
  2038. MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
  2039. put_back = 1;
  2040. break;
  2041. }
  2042. if (mmc_req_rel_wr(next) &&
  2043. (md->flags & MMC_BLK_REL_WR) &&
  2044. !en_rel_wr) {
  2045. MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
  2046. put_back = 1;
  2047. break;
  2048. }
  2049. req_sectors += blk_rq_sectors(next);
  2050. if (req_sectors > max_blk_count) {
  2051. if (stats->enabled)
  2052. stats->pack_stop_reason[EXCEEDS_SECTORS]++;
  2053. put_back = 1;
  2054. break;
  2055. }
  2056. phys_segments += next->nr_phys_segments;
  2057. if (phys_segments > max_phys_segs) {
  2058. MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
  2059. put_back = 1;
  2060. break;
  2061. }
  2062. if (mq->no_pack_for_random) {
  2063. if ((blk_rq_pos(cur) + blk_rq_sectors(cur)) !=
  2064. blk_rq_pos(next)) {
  2065. MMC_BLK_UPDATE_STOP_REASON(stats, RANDOM);
  2066. put_back = 1;
  2067. break;
  2068. }
  2069. }
  2070. if (rq_data_dir(next) == WRITE) {
  2071. mq->num_of_potential_packed_wr_reqs++;
  2072. if (card->ext_csd.bkops_en)
  2073. card->bkops_info.sectors_changed +=
  2074. blk_rq_sectors(next);
  2075. }
  2076. list_add_tail(&next->queuelist, &mq->mqrq_cur->packed_list);
  2077. cur = next;
  2078. reqs++;
  2079. }
  2080. if (put_back) {
  2081. spin_lock_irq(q->queue_lock);
  2082. blk_requeue_request(q, next);
  2083. spin_unlock_irq(q->queue_lock);
  2084. }
  2085. if (stats->enabled) {
  2086. if (reqs + 1 <= card->ext_csd.max_packed_writes)
  2087. stats->packing_events[reqs + 1]++;
  2088. if (reqs + 1 == max_packed_rw)
  2089. MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD);
  2090. }
  2091. spin_unlock(&stats->lock);
  2092. if (reqs > 0) {
  2093. list_add(&req->queuelist, &mq->mqrq_cur->packed_list);
  2094. mq->mqrq_cur->packed_num = ++reqs;
  2095. mq->mqrq_cur->packed_retries = reqs;
  2096. return reqs;
  2097. }
  2098. no_packed:
  2099. mmc_blk_clear_packed(mq->mqrq_cur);
  2100. return 0;
  2101. }
  2102. static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
  2103. struct mmc_card *card,
  2104. struct mmc_queue *mq)
  2105. {
  2106. struct mmc_blk_request *brq = &mqrq->brq;
  2107. struct request *req = mqrq->req;
  2108. struct request *prq;
  2109. struct mmc_blk_data *md = mq->data;
  2110. bool do_rel_wr, do_data_tag;
  2111. u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
  2112. u8 i = 1;
  2113. mqrq->packed_cmd = MMC_PACKED_WRITE;
  2114. mqrq->packed_blocks = 0;
  2115. mqrq->packed_fail_idx = MMC_PACKED_N_IDX;
  2116. memset(packed_cmd_hdr, 0, sizeof(mqrq->packed_cmd_hdr));
  2117. packed_cmd_hdr[0] = (mqrq->packed_num << 16) |
  2118. (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
  2119. /*
  2120. * Argument for each entry of packed group
  2121. */
  2122. list_for_each_entry(prq, &mqrq->packed_list, queuelist) {
  2123. do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
  2124. do_data_tag = (card->ext_csd.data_tag_unit_size) &&
  2125. (prq->cmd_flags & REQ_META) &&
  2126. (rq_data_dir(prq) == WRITE) &&
  2127. blk_rq_bytes(prq) >= card->ext_csd.data_tag_unit_size;
  2128. /* Argument of CMD23 */
  2129. packed_cmd_hdr[(i * 2)] =
  2130. (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
  2131. (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
  2132. blk_rq_sectors(prq);
  2133. /* Argument of CMD18 or CMD25 */
  2134. packed_cmd_hdr[((i * 2)) + 1] =
  2135. mmc_card_blockaddr(card) ?
  2136. blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
  2137. mqrq->packed_blocks += blk_rq_sectors(prq);
  2138. i++;
  2139. }
  2140. memset(brq, 0, sizeof(struct mmc_blk_request));
  2141. brq->mrq.cmd = &brq->cmd;
  2142. brq->mrq.data = &brq->data;
  2143. brq->mrq.sbc = &brq->sbc;
  2144. brq->mrq.stop = &brq->stop;
  2145. brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
  2146. brq->sbc.arg = MMC_CMD23_ARG_PACKED | (mqrq->packed_blocks + 1);
  2147. brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
  2148. brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
  2149. brq->cmd.arg = blk_rq_pos(req);
  2150. if (!mmc_card_blockaddr(card))
  2151. brq->cmd.arg <<= 9;
  2152. brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
  2153. brq->data.blksz = 512;
  2154. brq->data.blocks = mqrq->packed_blocks + 1;
  2155. brq->data.flags |= MMC_DATA_WRITE;
  2156. brq->data.fault_injected = false;
  2157. brq->stop.opcode = MMC_STOP_TRANSMISSION;
  2158. brq->stop.arg = 0;
  2159. brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
  2160. mmc_set_data_timeout(&brq->data, card);
  2161. brq->data.sg = mqrq->sg;
  2162. brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
  2163. mqrq->mmc_active.mrq = &brq->mrq;
  2164. mqrq->mmc_active.cmd_flags = req->cmd_flags;
  2165. /*
  2166. * This is intended for packed commands tests usage - in case these
  2167. * functions are not in use the respective pointers are NULL
  2168. */
  2169. if (mq->err_check_fn)
  2170. mqrq->mmc_active.err_check = mq->err_check_fn;
  2171. else
  2172. mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
  2173. if (mq->packed_test_fn)
  2174. mq->packed_test_fn(mq->queue, mqrq);
  2175. mqrq->mmc_active.reinsert_req = mmc_blk_reinsert_req;
  2176. mqrq->mmc_active.update_interrupted_req =
  2177. mmc_blk_update_interrupted_req;
  2178. mmc_queue_bounce_pre(mqrq);
  2179. }
  2180. static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
  2181. struct mmc_blk_request *brq, struct request *req,
  2182. int ret)
  2183. {
  2184. struct mmc_queue_req *mq_rq;
  2185. mq_rq = container_of(brq, struct mmc_queue_req, brq);
  2186. /*
  2187. * If this is an SD card and we're writing, we can first
  2188. * mark the known good sectors as ok.
  2189. *
  2190. * If the card is not SD, we can still ok written sectors
  2191. * as reported by the controller (which might be less than
  2192. * the real number of written sectors, but never more).
  2193. */
  2194. if (mmc_card_sd(card)) {
  2195. u32 blocks;
  2196. if (!brq->data.fault_injected) {
  2197. blocks = mmc_sd_num_wr_blocks(card);
  2198. if (blocks != (u32)-1)
  2199. ret = blk_end_request(req, 0, blocks << 9);
  2200. } else
  2201. ret = blk_end_request(req, 0, brq->data.bytes_xfered);
  2202. } else {
  2203. if (mq_rq->packed_cmd == MMC_PACKED_NONE)
  2204. ret = blk_end_request(req, 0, brq->data.bytes_xfered);
  2205. }
  2206. return ret;
  2207. }
  2208. static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
  2209. {
  2210. struct request *prq;
  2211. int idx = mq_rq->packed_fail_idx, i = 0;
  2212. int ret = 0;
  2213. while (!list_empty(&mq_rq->packed_list)) {
  2214. prq = list_entry_rq(mq_rq->packed_list.next);
  2215. if (idx == i) {
  2216. /* retry from error index */
  2217. mq_rq->packed_num -= idx;
  2218. mq_rq->req = prq;
  2219. ret = 1;
  2220. if (mq_rq->packed_num == MMC_PACKED_N_SINGLE) {
  2221. list_del_init(&prq->queuelist);
  2222. mmc_blk_clear_packed(mq_rq);
  2223. }
  2224. return ret;
  2225. }
  2226. list_del_init(&prq->queuelist);
  2227. blk_end_request(prq, 0, blk_rq_bytes(prq));
  2228. i++;
  2229. }
  2230. mmc_blk_clear_packed(mq_rq);
  2231. return ret;
  2232. }
  2233. static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq,
  2234. unsigned int cmd_flags)
  2235. {
  2236. struct request *prq;
  2237. while (!list_empty(&mq_rq->packed_list)) {
  2238. prq = list_entry_rq(mq_rq->packed_list.next);
  2239. list_del_init(&prq->queuelist);
  2240. prq->cmd_flags |= cmd_flags;
  2241. blk_end_request(prq, -EIO, blk_rq_bytes(prq));
  2242. }
  2243. mmc_blk_clear_packed(mq_rq);
  2244. }
  2245. static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
  2246. struct mmc_queue_req *mq_rq)
  2247. {
  2248. struct request *prq;
  2249. struct request_queue *q = mq->queue;
  2250. while (!list_empty(&mq_rq->packed_list)) {
  2251. prq = list_entry_rq(mq_rq->packed_list.prev);
  2252. if (prq->queuelist.prev != &mq_rq->packed_list) {
  2253. list_del_init(&prq->queuelist);
  2254. spin_lock_irq(q->queue_lock);
  2255. blk_requeue_request(mq->queue, prq);
  2256. spin_unlock_irq(q->queue_lock);
  2257. } else {
  2258. list_del_init(&prq->queuelist);
  2259. }
  2260. }
  2261. mmc_blk_clear_packed(mq_rq);
  2262. }
  2263. static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
  2264. {
  2265. struct mmc_blk_data *md = mq->data;
  2266. struct mmc_card *card = md->queue.card;
  2267. struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
  2268. int ret = 1, disable_multi = 0, retry = 0, type;
  2269. enum mmc_blk_status status;
  2270. struct mmc_queue_req *mq_rq;
  2271. struct request *req;
  2272. struct mmc_async_req *areq;
  2273. const u8 packed_num = 2;
  2274. u8 reqs = 0;
  2275. if (!rqc && !mq->mqrq_prev->req)
  2276. return 0;
  2277. if (rqc) {
  2278. if ((card->ext_csd.bkops_en) && (rq_data_dir(rqc) == WRITE))
  2279. card->bkops_info.sectors_changed += blk_rq_sectors(rqc);
  2280. reqs = mmc_blk_prep_packed_list(mq, rqc);
  2281. }
  2282. do {
  2283. if (rqc) {
  2284. if (reqs >= packed_num)
  2285. mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
  2286. card, mq);
  2287. else
  2288. mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
  2289. areq = &mq->mqrq_cur->mmc_active;
  2290. } else
  2291. areq = NULL;
  2292. areq = mmc_start_req(card->host, areq, (int *) &status);
  2293. if (!areq) {
  2294. if (status == MMC_BLK_NEW_REQUEST)
  2295. set_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
  2296. return 0;
  2297. }
  2298. mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
  2299. brq = &mq_rq->brq;
  2300. req = mq_rq->req;
  2301. type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
  2302. mmc_queue_bounce_post(mq_rq);
  2303. switch (status) {
  2304. case MMC_BLK_URGENT:
  2305. if (mq_rq->packed_cmd != MMC_PACKED_NONE) {
  2306. /* complete successfully transmitted part */
  2307. if (mmc_blk_end_packed_req(mq_rq))
  2308. /* process for not transmitted part */
  2309. mmc_blk_reinsert_req(areq);
  2310. } else {
  2311. mmc_blk_reinsert_req(areq);
  2312. }
  2313. set_bit(MMC_QUEUE_URGENT_REQUEST, &mq->flags);
  2314. ret = 0;
  2315. break;
  2316. case MMC_BLK_URGENT_DONE:
  2317. case MMC_BLK_SUCCESS:
  2318. case MMC_BLK_PARTIAL:
  2319. /*
  2320. * A block was successfully transferred.
  2321. */
  2322. mmc_blk_reset_success(md, type);
  2323. if (mq_rq->packed_cmd != MMC_PACKED_NONE) {
  2324. ret = mmc_blk_end_packed_req(mq_rq);
  2325. break;
  2326. } else {
  2327. ret = blk_end_request(req, 0,
  2328. brq->data.bytes_xfered);
  2329. }
  2330. /*
  2331. * If the blk_end_request function returns non-zero even
  2332. * though all data has been transferred and no errors
  2333. * were returned by the host controller, it's a bug.
  2334. */
  2335. if (status == MMC_BLK_SUCCESS && ret) {
  2336. pr_err("%s BUG rq_tot %d d_xfer %d\n",
  2337. __func__, blk_rq_bytes(req),
  2338. brq->data.bytes_xfered);
  2339. rqc = NULL;
  2340. goto cmd_abort;
  2341. }
  2342. break;
  2343. case MMC_BLK_CMD_ERR:
  2344. ret = mmc_blk_cmd_err(md, card, brq, req, ret);
  2345. if (mmc_blk_reset(md, card->host, type))
  2346. goto cmd_abort;
  2347. if (!ret)
  2348. goto start_new_req;
  2349. break;
  2350. case MMC_BLK_RETRY:
  2351. if (retry++ < MMC_BLK_MAX_RETRIES)
  2352. break;
  2353. /* Fall through */
  2354. case MMC_BLK_ABORT:
  2355. if (!mmc_blk_reset(md, card->host, type) &&
  2356. (retry++ < (MMC_BLK_MAX_RETRIES + 1)))
  2357. break;
  2358. goto cmd_abort;
  2359. case MMC_BLK_DATA_ERR: {
  2360. int err;
  2361. err = mmc_blk_reset(md, card->host, type);
  2362. if (!err)
  2363. break;
  2364. if (err == -ENODEV ||
  2365. mq_rq->packed_cmd != MMC_PACKED_NONE)
  2366. goto cmd_abort;
  2367. /* Fall through */
  2368. }
  2369. case MMC_BLK_ECC_ERR:
  2370. if (brq->data.blocks > 1) {
  2371. /* Redo read one sector at a time */
  2372. pr_warning("%s: retrying using single block read\n",
  2373. req->rq_disk->disk_name);
  2374. disable_multi = 1;
  2375. break;
  2376. }
  2377. /*
  2378. * case : SDcard Sector 0 read data error even single read
  2379. * skip reading other blocks.
  2380. */
  2381. if (mmc_card_sd(card) &&
  2382. (unsigned)blk_rq_pos(req) == 0 &&
  2383. brq->data.error)
  2384. goto cmd_abort;
  2385. /*
  2386. * After an error, we redo I/O one sector at a
  2387. * time, so we only reach here after trying to
  2388. * read a single sector.
  2389. */
  2390. ret = blk_end_request(req, -EIO,
  2391. brq->data.blksz);
  2392. if (!ret)
  2393. goto start_new_req;
  2394. break;
  2395. case MMC_BLK_NOMEDIUM:
  2396. goto cmd_abort;
  2397. default:
  2398. pr_err("%s:%s: Unhandled return value (%d)",
  2399. req->rq_disk->disk_name,
  2400. __func__, status);
  2401. goto cmd_abort;
  2402. }
  2403. if (ret) {
  2404. if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
  2405. /*
  2406. * In case of a incomplete request
  2407. * prepare it again and resend.
  2408. */
  2409. mmc_blk_rw_rq_prep(mq_rq, card,
  2410. disable_multi, mq);
  2411. mmc_start_req(card->host,
  2412. &mq_rq->mmc_active, NULL);
  2413. } else {
  2414. if (!mq_rq->packed_retries)
  2415. goto cmd_abort;
  2416. mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
  2417. mmc_start_req(card->host,
  2418. &mq_rq->mmc_active, NULL);
  2419. }
  2420. }
  2421. } while (ret);
  2422. return 1;
  2423. cmd_abort:
  2424. if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
  2425. if (mmc_card_removed(card))
  2426. req->cmd_flags |= REQ_QUIET;
  2427. while (ret)
  2428. ret = blk_end_request(req, -EIO,
  2429. blk_rq_cur_bytes(req));
  2430. } else {
  2431. mmc_blk_abort_packed_req(mq_rq, 0);
  2432. }
  2433. start_new_req:
  2434. if (rqc) {
  2435. if (mmc_card_removed(card)) {
  2436. if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
  2437. rqc->cmd_flags |= REQ_QUIET;
  2438. blk_end_request_all(rqc, -EIO);
  2439. } else {
  2440. mmc_blk_abort_packed_req(mq_rq, REQ_QUIET);
  2441. }
  2442. } else {
  2443. /* If current request is packed, it needs to put back */
  2444. if (mq_rq->packed_cmd != MMC_PACKED_NONE)
  2445. mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
  2446. mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
  2447. mmc_start_req(card->host,
  2448. &mq->mqrq_cur->mmc_active,
  2449. NULL);
  2450. }
  2451. }
  2452. return 0;
  2453. }
  2454. static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
  2455. {
  2456. int ret;
  2457. struct mmc_blk_data *md = mq->data;
  2458. struct mmc_card *card = md->queue.card;
  2459. struct mmc_host *host = card->host;
  2460. unsigned long flags;
  2461. unsigned int cmd_flags = req ? req->cmd_flags : 0;
  2462. if (req && !mq->mqrq_prev->req) {
  2463. mmc_rpm_hold(host, &card->dev);
  2464. #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
  2465. if (mmc_bus_needs_resume(card->host)) {
  2466. mmc_resume_bus(card->host);
  2467. }
  2468. #endif
  2469. /* claim host only for the first request */
  2470. mmc_claim_host(card->host);
  2471. if (card->ext_csd.bkops_en)
  2472. mmc_stop_bkops(card);
  2473. }
  2474. ret = mmc_blk_part_switch(card, md);
  2475. if (ret) {
  2476. if (req) {
  2477. blk_end_request_all(req, -EIO);
  2478. }
  2479. ret = 0;
  2480. goto out;
  2481. }
  2482. mmc_blk_write_packing_control(mq, req);
  2483. clear_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
  2484. clear_bit(MMC_QUEUE_URGENT_REQUEST, &mq->flags);
  2485. if (cmd_flags & REQ_SANITIZE) {
  2486. /* complete ongoing async transfer before issuing sanitize */
  2487. if (card->host && card->host->areq)
  2488. mmc_blk_issue_rw_rq(mq, NULL);
  2489. ret = mmc_blk_issue_sanitize_rq(mq, req);
  2490. } else if (cmd_flags & REQ_DISCARD) {
  2491. /* complete ongoing async transfer before issuing discard */
  2492. if (card->host->areq)
  2493. mmc_blk_issue_rw_rq(mq, NULL);
  2494. if (cmd_flags & REQ_SECURE &&
  2495. !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
  2496. ret = mmc_blk_issue_secdiscard_rq(mq, req);
  2497. else
  2498. ret = mmc_blk_issue_discard_rq(mq, req);
  2499. } else if (cmd_flags & REQ_FLUSH) {
  2500. /* complete ongoing async transfer before issuing flush */
  2501. if (card->host->areq)
  2502. mmc_blk_issue_rw_rq(mq, NULL);
  2503. ret = mmc_blk_issue_flush(mq, req);
  2504. } else {
  2505. if (!req && host->areq) {
  2506. spin_lock_irqsave(&host->context_info.lock, flags);
  2507. host->context_info.is_waiting_last_req = true;
  2508. spin_unlock_irqrestore(&host->context_info.lock, flags);
  2509. }
  2510. ret = mmc_blk_issue_rw_rq(mq, req);
  2511. }
  2512. out:
  2513. /*
  2514. * packet burst is over, when one of the following occurs:
  2515. * - no more requests and new request notification is not in progress
  2516. * - urgent notification in progress and current request is not urgent
  2517. * (all existing requests completed or reinserted to the block layer)
  2518. */
  2519. if ((!req && !(test_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags))) ||
  2520. (cmd_flags & MMC_REQ_SPECIAL_MASK) ||
  2521. ((test_bit(MMC_QUEUE_URGENT_REQUEST, &mq->flags)) &&
  2522. !(cmd_flags & MMC_REQ_NOREINSERT_MASK))) {
  2523. if (mmc_card_need_bkops(card))
  2524. mmc_start_bkops(card, false);
  2525. /* release host only when there are no more requests */
  2526. mmc_release_host(card->host);
  2527. mmc_rpm_release(host, &card->dev);
  2528. }
  2529. return ret;
  2530. }
  2531. static inline int mmc_blk_readonly(struct mmc_card *card)
  2532. {
  2533. return mmc_card_readonly(card) ||
  2534. !(card->csd.cmdclass & CCC_BLOCK_WRITE);
  2535. }
  2536. static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
  2537. struct device *parent,
  2538. sector_t size,
  2539. bool default_ro,
  2540. const char *subname,
  2541. int area_type)
  2542. {
  2543. struct mmc_blk_data *md;
  2544. int devidx, ret;
  2545. unsigned int percentage =
  2546. BKOPS_SIZE_PERCENTAGE_TO_QUEUE_DELAYED_WORK;
  2547. devidx = find_first_zero_bit(dev_use, max_devices);
  2548. if (devidx >= max_devices)
  2549. return ERR_PTR(-ENOSPC);
  2550. __set_bit(devidx, dev_use);
  2551. md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL);
  2552. if (!md) {
  2553. ret = -ENOMEM;
  2554. goto out;
  2555. }
  2556. /*
  2557. * !subname implies we are creating main mmc_blk_data that will be
  2558. * associated with mmc_card with mmc_set_drvdata. Due to device
  2559. * partitions, devidx will not coincide with a per-physical card
  2560. * index anymore so we keep track of a name index.
  2561. */
  2562. if (!subname) {
  2563. md->name_idx = find_first_zero_bit(name_use, max_devices);
  2564. __set_bit(md->name_idx, name_use);
  2565. } else
  2566. md->name_idx = ((struct mmc_blk_data *)
  2567. dev_to_disk(parent)->private_data)->name_idx;
  2568. md->area_type = area_type;
  2569. /*
  2570. * Set the read-only status based on the supported commands
  2571. * and the write protect switch.
  2572. */
  2573. md->read_only = mmc_blk_readonly(card);
  2574. md->disk = alloc_disk(perdev_minors);
  2575. if (md->disk == NULL) {
  2576. ret = -ENOMEM;
  2577. goto err_kfree;
  2578. }
  2579. spin_lock_init(&md->lock);
  2580. INIT_LIST_HEAD(&md->part);
  2581. md->usage = 1;
  2582. ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
  2583. if (ret)
  2584. goto err_putdisk;
  2585. md->queue.issue_fn = mmc_blk_issue_rq;
  2586. md->queue.data = md;
  2587. md->disk->major = MMC_BLOCK_MAJOR;
  2588. md->disk->first_minor = devidx * perdev_minors;
  2589. md->disk->fops = &mmc_bdops;
  2590. md->disk->private_data = md;
  2591. md->disk->queue = md->queue.queue;
  2592. md->disk->driverfs_dev = parent;
  2593. set_disk_ro(md->disk, md->read_only || default_ro);
  2594. md->disk->flags = GENHD_FL_EXT_DEVT;
  2595. if (area_type & MMC_BLK_DATA_AREA_RPMB)
  2596. md->disk->flags |= GENHD_FL_NO_PART_SCAN;
  2597. /*
  2598. * As discussed on lkml, GENHD_FL_REMOVABLE should:
  2599. *
  2600. * - be set for removable media with permanent block devices
  2601. * - be unset for removable block devices with permanent media
  2602. *
  2603. * Since MMC block devices clearly fall under the second
  2604. * case, we do not set GENHD_FL_REMOVABLE. Userspace
  2605. * should use the block device creation/destruction hotplug
  2606. * messages to tell when the card is present.
  2607. */
  2608. snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
  2609. "mmcblk%d%s", md->name_idx, subname ? subname : "");
  2610. blk_queue_logical_block_size(md->queue.queue, 512);
  2611. set_capacity(md->disk, size);
  2612. card->bkops_info.size_percentage_to_queue_delayed_work = percentage;
  2613. card->bkops_info.min_sectors_to_queue_delayed_work =
  2614. ((unsigned int)size * percentage) / 100;
  2615. if (mmc_host_cmd23(card->host)) {
  2616. if (mmc_card_mmc(card) ||
  2617. (mmc_card_sd(card) &&
  2618. card->scr.cmds & SD_SCR_CMD23_SUPPORT &&
  2619. mmc_sd_card_uhs(card)))
  2620. md->flags |= MMC_BLK_CMD23;
  2621. }
  2622. if (mmc_card_mmc(card) &&
  2623. md->flags & MMC_BLK_CMD23 &&
  2624. ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
  2625. card->ext_csd.rel_sectors)) {
  2626. md->flags |= MMC_BLK_REL_WR;
  2627. blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
  2628. }
  2629. return md;
  2630. err_putdisk:
  2631. put_disk(md->disk);
  2632. err_kfree:
  2633. if (!subname)
  2634. __clear_bit(md->name_idx, name_use);
  2635. kfree(md);
  2636. out:
  2637. __clear_bit(devidx, dev_use);
  2638. return ERR_PTR(ret);
  2639. }
  2640. static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
  2641. {
  2642. sector_t size;
  2643. struct mmc_blk_data *md;
  2644. if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
  2645. /*
  2646. * The EXT_CSD sector count is in number or 512 byte
  2647. * sectors.
  2648. */
  2649. size = card->ext_csd.sectors;
  2650. } else {
  2651. /*
  2652. * The CSD capacity field is in units of read_blkbits.
  2653. * set_capacity takes units of 512 bytes.
  2654. */
  2655. size = card->csd.capacity << (card->csd.read_blkbits - 9);
  2656. }
  2657. md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
  2658. MMC_BLK_DATA_AREA_MAIN);
  2659. return md;
  2660. }
  2661. static int mmc_blk_alloc_part(struct mmc_card *card,
  2662. struct mmc_blk_data *md,
  2663. unsigned int part_type,
  2664. sector_t size,
  2665. bool default_ro,
  2666. const char *subname,
  2667. int area_type)
  2668. {
  2669. char cap_str[10];
  2670. struct mmc_blk_data *part_md;
  2671. part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
  2672. subname, area_type);
  2673. if (IS_ERR(part_md))
  2674. return PTR_ERR(part_md);
  2675. part_md->part_type = part_type;
  2676. list_add(&part_md->part, &md->part);
  2677. string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
  2678. cap_str, sizeof(cap_str));
  2679. pr_info("%s: %s %s partition %u %s\n",
  2680. part_md->disk->disk_name, mmc_card_id(card),
  2681. mmc_card_name(card), part_md->part_type, cap_str);
  2682. return 0;
  2683. }
  2684. /* MMC Physical partitions consist of two boot partitions and
  2685. * up to four general purpose partitions.
  2686. * For each partition enabled in EXT_CSD a block device will be allocatedi
  2687. * to provide access to the partition.
  2688. */
  2689. static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
  2690. {
  2691. int idx, ret = 0;
  2692. if (!mmc_card_mmc(card))
  2693. return 0;
  2694. for (idx = 0; idx < card->nr_parts; idx++) {
  2695. if (card->part[idx].size) {
  2696. ret = mmc_blk_alloc_part(card, md,
  2697. card->part[idx].part_cfg,
  2698. card->part[idx].size >> 9,
  2699. card->part[idx].force_ro,
  2700. card->part[idx].name,
  2701. card->part[idx].area_type);
  2702. if (ret)
  2703. return ret;
  2704. }
  2705. }
  2706. return ret;
  2707. }
  2708. static void mmc_blk_remove_req(struct mmc_blk_data *md)
  2709. {
  2710. struct mmc_card *card;
  2711. if (md) {
  2712. card = md->queue.card;
  2713. device_remove_file(disk_to_dev(md->disk),
  2714. &md->num_wr_reqs_to_start_packing);
  2715. if (md->disk->flags & GENHD_FL_UP) {
  2716. device_remove_file(disk_to_dev(md->disk), &md->force_ro);
  2717. if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
  2718. card->ext_csd.boot_ro_lockable)
  2719. device_remove_file(disk_to_dev(md->disk),
  2720. &md->power_ro_lock);
  2721. /* Stop new requests from getting into the queue */
  2722. del_gendisk(md->disk);
  2723. }
  2724. /* Then flush out any already in there */
  2725. mmc_cleanup_queue(&md->queue);
  2726. mmc_blk_put(md);
  2727. }
  2728. }
  2729. static void mmc_blk_remove_parts(struct mmc_card *card,
  2730. struct mmc_blk_data *md)
  2731. {
  2732. struct list_head *pos, *q;
  2733. struct mmc_blk_data *part_md;
  2734. __clear_bit(md->name_idx, name_use);
  2735. list_for_each_safe(pos, q, &md->part) {
  2736. part_md = list_entry(pos, struct mmc_blk_data, part);
  2737. list_del(pos);
  2738. mmc_blk_remove_req(part_md);
  2739. }
  2740. }
  2741. static int mmc_add_disk(struct mmc_blk_data *md)
  2742. {
  2743. int ret;
  2744. struct mmc_card *card = md->queue.card;
  2745. add_disk(md->disk);
  2746. md->force_ro.show = force_ro_show;
  2747. md->force_ro.store = force_ro_store;
  2748. sysfs_attr_init(&md->force_ro.attr);
  2749. md->force_ro.attr.name = "force_ro";
  2750. md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
  2751. ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
  2752. if (ret)
  2753. goto force_ro_fail;
  2754. if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
  2755. card->ext_csd.boot_ro_lockable) {
  2756. umode_t mode;
  2757. if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
  2758. mode = S_IRUGO;
  2759. else
  2760. mode = S_IRUGO | S_IWUSR;
  2761. md->power_ro_lock.show = power_ro_lock_show;
  2762. md->power_ro_lock.store = power_ro_lock_store;
  2763. sysfs_attr_init(&md->power_ro_lock.attr);
  2764. md->power_ro_lock.attr.mode = mode;
  2765. md->power_ro_lock.attr.name =
  2766. "ro_lock_until_next_power_on";
  2767. ret = device_create_file(disk_to_dev(md->disk),
  2768. &md->power_ro_lock);
  2769. if (ret)
  2770. goto power_ro_lock_fail;
  2771. }
  2772. md->num_wr_reqs_to_start_packing.show =
  2773. num_wr_reqs_to_start_packing_show;
  2774. md->num_wr_reqs_to_start_packing.store =
  2775. num_wr_reqs_to_start_packing_store;
  2776. sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr);
  2777. md->num_wr_reqs_to_start_packing.attr.name =
  2778. "num_wr_reqs_to_start_packing";
  2779. md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR;
  2780. ret = device_create_file(disk_to_dev(md->disk),
  2781. &md->num_wr_reqs_to_start_packing);
  2782. if (ret)
  2783. goto num_wr_reqs_to_start_packing_fail;
  2784. md->bkops_check_threshold.show = bkops_check_threshold_show;
  2785. md->bkops_check_threshold.store = bkops_check_threshold_store;
  2786. sysfs_attr_init(&md->bkops_check_threshold.attr);
  2787. md->bkops_check_threshold.attr.name = "bkops_check_threshold";
  2788. md->bkops_check_threshold.attr.mode = S_IRUGO | S_IWUSR;
  2789. ret = device_create_file(disk_to_dev(md->disk),
  2790. &md->bkops_check_threshold);
  2791. if (ret)
  2792. goto bkops_check_threshold_fails;
  2793. md->no_pack_for_random.show = no_pack_for_random_show;
  2794. md->no_pack_for_random.store = no_pack_for_random_store;
  2795. sysfs_attr_init(&md->no_pack_for_random.attr);
  2796. md->no_pack_for_random.attr.name = "no_pack_for_random";
  2797. md->no_pack_for_random.attr.mode = S_IRUGO | S_IWUSR;
  2798. ret = device_create_file(disk_to_dev(md->disk),
  2799. &md->no_pack_for_random);
  2800. if (ret)
  2801. goto no_pack_for_random_fails;
  2802. return ret;
  2803. no_pack_for_random_fails:
  2804. device_remove_file(disk_to_dev(md->disk),
  2805. &md->bkops_check_threshold);
  2806. bkops_check_threshold_fails:
  2807. device_remove_file(disk_to_dev(md->disk),
  2808. &md->num_wr_reqs_to_start_packing);
  2809. num_wr_reqs_to_start_packing_fail:
  2810. device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock);
  2811. power_ro_lock_fail:
  2812. device_remove_file(disk_to_dev(md->disk), &md->force_ro);
  2813. force_ro_fail:
  2814. del_gendisk(md->disk);
  2815. return ret;
  2816. }
  2817. #define CID_MANFID_SANDISK 0x2
  2818. #define CID_MANFID_TOSHIBA 0x11
  2819. #define CID_MANFID_MICRON 0x13
  2820. #define CID_MANFID_SAMSUNG 0x15
  2821. #define CID_MANFID_HYNIX 0x90
  2822. static const struct mmc_fixup blk_fixups[] =
  2823. {
  2824. MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
  2825. MMC_QUIRK_INAND_CMD38),
  2826. MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
  2827. MMC_QUIRK_INAND_CMD38),
  2828. MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
  2829. MMC_QUIRK_INAND_CMD38),
  2830. MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
  2831. MMC_QUIRK_INAND_CMD38),
  2832. MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
  2833. MMC_QUIRK_INAND_CMD38),
  2834. /*
  2835. * Some MMC cards experience performance degradation with CMD23
  2836. * instead of CMD12-bounded multiblock transfers. For now we'll
  2837. * black list what's bad...
  2838. * - Certain Toshiba cards.
  2839. *
  2840. * N.B. This doesn't affect SD cards.
  2841. */
  2842. MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
  2843. MMC_QUIRK_BLK_NO_CMD23),
  2844. MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
  2845. MMC_QUIRK_BLK_NO_CMD23),
  2846. MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
  2847. MMC_QUIRK_BLK_NO_CMD23),
  2848. /*
  2849. * Some Micron MMC cards needs longer data read timeout than
  2850. * indicated in CSD.
  2851. */
  2852. MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
  2853. MMC_QUIRK_LONG_READ_TIME),
  2854. /* Some INAND MCP devices advertise incorrect timeout values */
  2855. MMC_FIXUP("SEM04G", 0x45, CID_OEMID_ANY, add_quirk_mmc,
  2856. MMC_QUIRK_INAND_DATA_TIMEOUT),
  2857. /*
  2858. * On these Samsung MoviNAND parts, performing secure erase or
  2859. * secure trim can result in unrecoverable corruption due to a
  2860. * firmware bug.
  2861. */
  2862. MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
  2863. MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
  2864. MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
  2865. MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
  2866. MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
  2867. MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
  2868. MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
  2869. MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
  2870. MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
  2871. MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
  2872. MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
  2873. MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
  2874. MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
  2875. MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
  2876. MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
  2877. MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
  2878. MMC_FIXUP(CID_NAME_ANY, CID_MANFID_HYNIX, CID_OEMID_ANY, add_quirk_mmc,
  2879. MMC_QUIRK_BROKEN_DATA_TIMEOUT),
  2880. END_FIXUP
  2881. };
  2882. #ifdef CONFIG_MMC_SUPPORT_BKOPS_MODE
  2883. static ssize_t bkops_mode_show(struct device *dev,
  2884. struct device_attribute *attr, char *buf)
  2885. {
  2886. struct gendisk *disk;
  2887. struct mmc_blk_data *md;
  2888. struct mmc_card *card;
  2889. disk = dev_to_disk(dev);
  2890. if (disk)
  2891. md = disk->private_data;
  2892. else
  2893. goto show_out;
  2894. if (md)
  2895. card = md->queue.card;
  2896. else
  2897. goto show_out;
  2898. return snprintf(buf, PAGE_SIZE, "%u\n", card->bkops_enable);
  2899. show_out:
  2900. return snprintf(buf, PAGE_SIZE, "\n");
  2901. }
  2902. static ssize_t bkops_mode_store(struct device *dev,
  2903. struct device_attribute *attr, const char *buf, size_t count)
  2904. {
  2905. struct gendisk *disk;
  2906. struct mmc_blk_data *md;
  2907. struct mmc_card *card;
  2908. u8 value;
  2909. int err = 0;
  2910. disk = dev_to_disk(dev);
  2911. if (disk)
  2912. md = disk->private_data;
  2913. else
  2914. goto store_out;
  2915. if (md)
  2916. card = md->queue.card;
  2917. else
  2918. goto store_out;
  2919. if (kstrtou8(buf, 0, &value))
  2920. goto store_out;
  2921. err = mmc_bkops_enable(card->host, value);
  2922. if (err)
  2923. return err;
  2924. return count;
  2925. store_out:
  2926. return -EINVAL;
  2927. }
  2928. static inline void mmc_blk_bkops_sysfs_init(struct mmc_card *card)
  2929. {
  2930. struct mmc_blk_data *md = mmc_get_drvdata(card);
  2931. card->bkops_attr.show = bkops_mode_show;
  2932. card->bkops_attr.store = bkops_mode_store;
  2933. sysfs_attr_init(&card->bkops_attr.attr);
  2934. card->bkops_attr.attr.name = "bkops_en";
  2935. card->bkops_attr.attr.mode = S_IRUGO | S_IWUSR | S_IWGRP;
  2936. if (device_create_file((disk_to_dev(md->disk)), &card->bkops_attr)) {
  2937. pr_err("%s: Failed to create bkops_en sysfs entry\n",
  2938. mmc_hostname(card->host));
  2939. #if defined(CONFIG_MMC_BKOPS_NODE_UID) || defined(CONFIG_MMC_BKOPS_NODE_GID)
  2940. } else {
  2941. int rc;
  2942. struct device * dev;
  2943. dev = disk_to_dev(md->disk);
  2944. rc = sysfs_chown_file(&dev->kobj, &card->bkops_attr.attr,
  2945. CONFIG_MMC_BKOPS_NODE_UID,
  2946. CONFIG_MMC_BKOPS_NODE_GID);
  2947. if (rc)
  2948. pr_err("%s: Failed to change mode of sysfs entry\n",
  2949. mmc_hostname(card->host));
  2950. #endif
  2951. }
  2952. }
  2953. #else
  2954. static inline void mmc_blk_bkops_sysfs_init(struct mmc_card *card)
  2955. {
  2956. }
  2957. #endif
  2958. static int mmc_blk_probe(struct mmc_card *card)
  2959. {
  2960. struct mmc_blk_data *md, *part_md;
  2961. char cap_str[10];
  2962. /*
  2963. * Check that the card supports the command class(es) we need.
  2964. */
  2965. if (!(card->csd.cmdclass & CCC_BLOCK_READ))
  2966. return -ENODEV;
  2967. md = mmc_blk_alloc(card);
  2968. if (IS_ERR(md))
  2969. return PTR_ERR(md);
  2970. string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
  2971. cap_str, sizeof(cap_str));
  2972. pr_info("[%s]%s: %s %s %s %s, card->type:%d\n", __func__,
  2973. md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
  2974. cap_str, md->read_only ? "(ro)" : "", card->type);
  2975. if (mmc_blk_alloc_parts(card, md))
  2976. goto out;
  2977. mmc_set_drvdata(card, md);
  2978. mmc_fixup_device(card, blk_fixups);
  2979. #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
  2980. /*applying only MMC TYPE, need more time to verify SD TYPE*/
  2981. if (card && mmc_card_mmc(card))
  2982. mmc_set_bus_resume_policy(card->host, 1);
  2983. #endif
  2984. if (mmc_add_disk(md))
  2985. goto out;
  2986. list_for_each_entry(part_md, &md->part, part) {
  2987. if (mmc_add_disk(part_md))
  2988. goto out;
  2989. }
  2990. /* init sysfs for bkops mode */
  2991. if (card && mmc_card_mmc(card)) {
  2992. mmc_blk_bkops_sysfs_init(card);
  2993. spin_lock_init(&card->bkops_lock);
  2994. }
  2995. return 0;
  2996. out:
  2997. mmc_blk_remove_parts(card, md);
  2998. mmc_blk_remove_req(md);
  2999. return 0;
  3000. }
  3001. static void mmc_blk_remove(struct mmc_card *card)
  3002. {
  3003. struct mmc_blk_data *md = mmc_get_drvdata(card);
  3004. mmc_blk_remove_parts(card, md);
  3005. mmc_claim_host(card->host);
  3006. mmc_blk_part_switch(card, md);
  3007. mmc_release_host(card->host);
  3008. mmc_blk_remove_req(md);
  3009. mmc_set_drvdata(card, NULL);
  3010. #ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
  3011. mmc_set_bus_resume_policy(card->host, 0);
  3012. #endif
  3013. }
  3014. static void mmc_blk_shutdown(struct mmc_card *card)
  3015. {
  3016. struct mmc_blk_data *part_md;
  3017. struct mmc_blk_data *md = mmc_get_drvdata(card);
  3018. int rc;
  3019. /* Silent the block layer */
  3020. if (md) {
  3021. rc = mmc_queue_suspend(&md->queue, 1);
  3022. if (rc)
  3023. goto suspend_error;
  3024. list_for_each_entry(part_md, &md->part, part) {
  3025. rc = mmc_queue_suspend(&part_md->queue, 1);
  3026. if (rc)
  3027. goto suspend_error;
  3028. }
  3029. }
  3030. /* send power off notification */
  3031. if (mmc_card_mmc(card)) {
  3032. mmc_rpm_hold(card->host, &card->dev);
  3033. mmc_claim_host(card->host);
  3034. mmc_stop_bkops(card);
  3035. mmc_release_host(card->host);
  3036. mmc_send_pon(card);
  3037. mmc_rpm_release(card->host, &card->dev);
  3038. }
  3039. return;
  3040. suspend_error:
  3041. pr_err("%s: mmc_queue_suspend returned error = %d",
  3042. mmc_hostname(card->host), rc);
  3043. }
  3044. #ifdef CONFIG_PM
  3045. static int mmc_blk_suspend(struct mmc_card *card)
  3046. {
  3047. struct mmc_blk_data *part_md;
  3048. struct mmc_blk_data *md = mmc_get_drvdata(card);
  3049. int rc = 0;
  3050. if (md) {
  3051. rc = mmc_queue_suspend(&md->queue, 0);
  3052. if (rc)
  3053. goto out;
  3054. list_for_each_entry(part_md, &md->part, part) {
  3055. rc = mmc_queue_suspend(&part_md->queue, 0);
  3056. if (rc)
  3057. goto out_resume;
  3058. }
  3059. }
  3060. goto out;
  3061. out_resume:
  3062. mmc_queue_resume(&md->queue);
  3063. list_for_each_entry(part_md, &md->part, part) {
  3064. mmc_queue_resume(&part_md->queue);
  3065. }
  3066. out:
  3067. return rc;
  3068. }
  3069. static int mmc_blk_resume(struct mmc_card *card)
  3070. {
  3071. struct mmc_blk_data *part_md;
  3072. struct mmc_blk_data *md = mmc_get_drvdata(card);
  3073. if (md) {
  3074. /*
  3075. * Resume involves the card going into idle state,
  3076. * so current partition is always the main one.
  3077. */
  3078. md->part_curr = md->part_type;
  3079. mmc_queue_resume(&md->queue);
  3080. list_for_each_entry(part_md, &md->part, part) {
  3081. mmc_queue_resume(&part_md->queue);
  3082. }
  3083. }
  3084. return 0;
  3085. }
  3086. #else
  3087. #define mmc_blk_suspend NULL
  3088. #define mmc_blk_resume NULL
  3089. #endif
  3090. static struct mmc_driver mmc_driver = {
  3091. .drv = {
  3092. .name = "mmcblk",
  3093. },
  3094. .probe = mmc_blk_probe,
  3095. .remove = mmc_blk_remove,
  3096. .suspend = mmc_blk_suspend,
  3097. .resume = mmc_blk_resume,
  3098. .shutdown = mmc_blk_shutdown,
  3099. };
  3100. static int __init mmc_blk_init(void)
  3101. {
  3102. int res;
  3103. if (perdev_minors != CONFIG_MMC_BLOCK_MINORS)
  3104. pr_info("mmcblk: using %d minors per device\n", perdev_minors);
  3105. max_devices = 256 / perdev_minors;
  3106. res = register_blkdev(MMC_BLOCK_MAJOR, "mmc");
  3107. if (res)
  3108. goto out;
  3109. res = mmc_register_driver(&mmc_driver);
  3110. if (res)
  3111. goto out2;
  3112. return 0;
  3113. out2:
  3114. unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
  3115. out:
  3116. return res;
  3117. }
  3118. static void __exit mmc_blk_exit(void)
  3119. {
  3120. mmc_unregister_driver(&mmc_driver);
  3121. unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
  3122. }
  3123. module_init(mmc_blk_init);
  3124. module_exit(mmc_blk_exit);
  3125. MODULE_LICENSE("GPL");
  3126. MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");