omap-gpmc.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312
  1. /*
  2. * GPMC support functions
  3. *
  4. * Copyright (C) 2005-2006 Nokia Corporation
  5. *
  6. * Author: Juha Yrjola
  7. *
  8. * Copyright (C) 2009 Texas Instruments
  9. * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. */
  15. #include <linux/irq.h>
  16. #include <linux/kernel.h>
  17. #include <linux/init.h>
  18. #include <linux/err.h>
  19. #include <linux/clk.h>
  20. #include <linux/ioport.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/io.h>
  23. #include <linux/module.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/of.h>
  27. #include <linux/of_address.h>
  28. #include <linux/of_mtd.h>
  29. #include <linux/of_device.h>
  30. #include <linux/of_platform.h>
  31. #include <linux/omap-gpmc.h>
  32. #include <linux/mtd/nand.h>
  33. #include <linux/pm_runtime.h>
  34. #include <linux/platform_data/mtd-nand-omap2.h>
  35. #include <linux/platform_data/mtd-onenand-omap2.h>
  36. #include <asm/mach-types.h>
  37. #define DEVICE_NAME "omap-gpmc"
  38. /* GPMC register offsets */
  39. #define GPMC_REVISION 0x00
  40. #define GPMC_SYSCONFIG 0x10
  41. #define GPMC_SYSSTATUS 0x14
  42. #define GPMC_IRQSTATUS 0x18
  43. #define GPMC_IRQENABLE 0x1c
  44. #define GPMC_TIMEOUT_CONTROL 0x40
  45. #define GPMC_ERR_ADDRESS 0x44
  46. #define GPMC_ERR_TYPE 0x48
  47. #define GPMC_CONFIG 0x50
  48. #define GPMC_STATUS 0x54
  49. #define GPMC_PREFETCH_CONFIG1 0x1e0
  50. #define GPMC_PREFETCH_CONFIG2 0x1e4
  51. #define GPMC_PREFETCH_CONTROL 0x1ec
  52. #define GPMC_PREFETCH_STATUS 0x1f0
  53. #define GPMC_ECC_CONFIG 0x1f4
  54. #define GPMC_ECC_CONTROL 0x1f8
  55. #define GPMC_ECC_SIZE_CONFIG 0x1fc
  56. #define GPMC_ECC1_RESULT 0x200
  57. #define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */
  58. #define GPMC_ECC_BCH_RESULT_1 0x244 /* not available on OMAP2 */
  59. #define GPMC_ECC_BCH_RESULT_2 0x248 /* not available on OMAP2 */
  60. #define GPMC_ECC_BCH_RESULT_3 0x24c /* not available on OMAP2 */
  61. #define GPMC_ECC_BCH_RESULT_4 0x300 /* not available on OMAP2 */
  62. #define GPMC_ECC_BCH_RESULT_5 0x304 /* not available on OMAP2 */
  63. #define GPMC_ECC_BCH_RESULT_6 0x308 /* not available on OMAP2 */
  64. /* GPMC ECC control settings */
  65. #define GPMC_ECC_CTRL_ECCCLEAR 0x100
  66. #define GPMC_ECC_CTRL_ECCDISABLE 0x000
  67. #define GPMC_ECC_CTRL_ECCREG1 0x001
  68. #define GPMC_ECC_CTRL_ECCREG2 0x002
  69. #define GPMC_ECC_CTRL_ECCREG3 0x003
  70. #define GPMC_ECC_CTRL_ECCREG4 0x004
  71. #define GPMC_ECC_CTRL_ECCREG5 0x005
  72. #define GPMC_ECC_CTRL_ECCREG6 0x006
  73. #define GPMC_ECC_CTRL_ECCREG7 0x007
  74. #define GPMC_ECC_CTRL_ECCREG8 0x008
  75. #define GPMC_ECC_CTRL_ECCREG9 0x009
  76. #define GPMC_CONFIG_LIMITEDADDRESS BIT(1)
  77. #define GPMC_CONFIG2_CSEXTRADELAY BIT(7)
  78. #define GPMC_CONFIG3_ADVEXTRADELAY BIT(7)
  79. #define GPMC_CONFIG4_OEEXTRADELAY BIT(7)
  80. #define GPMC_CONFIG4_WEEXTRADELAY BIT(23)
  81. #define GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN BIT(6)
  82. #define GPMC_CONFIG6_CYCLE2CYCLESAMECSEN BIT(7)
  83. #define GPMC_CS0_OFFSET 0x60
  84. #define GPMC_CS_SIZE 0x30
  85. #define GPMC_BCH_SIZE 0x10
  86. #define GPMC_MEM_END 0x3FFFFFFF
  87. #define GPMC_CHUNK_SHIFT 24 /* 16 MB */
  88. #define GPMC_SECTION_SHIFT 28 /* 128 MB */
  89. #define CS_NUM_SHIFT 24
  90. #define ENABLE_PREFETCH (0x1 << 7)
  91. #define DMA_MPU_MODE 2
  92. #define GPMC_REVISION_MAJOR(l) ((l >> 4) & 0xf)
  93. #define GPMC_REVISION_MINOR(l) (l & 0xf)
  94. #define GPMC_HAS_WR_ACCESS 0x1
  95. #define GPMC_HAS_WR_DATA_MUX_BUS 0x2
  96. #define GPMC_HAS_MUX_AAD 0x4
  97. #define GPMC_NR_WAITPINS 4
  98. #define GPMC_CS_CONFIG1 0x00
  99. #define GPMC_CS_CONFIG2 0x04
  100. #define GPMC_CS_CONFIG3 0x08
  101. #define GPMC_CS_CONFIG4 0x0c
  102. #define GPMC_CS_CONFIG5 0x10
  103. #define GPMC_CS_CONFIG6 0x14
  104. #define GPMC_CS_CONFIG7 0x18
  105. #define GPMC_CS_NAND_COMMAND 0x1c
  106. #define GPMC_CS_NAND_ADDRESS 0x20
  107. #define GPMC_CS_NAND_DATA 0x24
  108. /* Control Commands */
  109. #define GPMC_CONFIG_RDY_BSY 0x00000001
  110. #define GPMC_CONFIG_DEV_SIZE 0x00000002
  111. #define GPMC_CONFIG_DEV_TYPE 0x00000003
  112. #define GPMC_SET_IRQ_STATUS 0x00000004
  113. #define GPMC_CONFIG1_WRAPBURST_SUPP (1 << 31)
  114. #define GPMC_CONFIG1_READMULTIPLE_SUPP (1 << 30)
  115. #define GPMC_CONFIG1_READTYPE_ASYNC (0 << 29)
  116. #define GPMC_CONFIG1_READTYPE_SYNC (1 << 29)
  117. #define GPMC_CONFIG1_WRITEMULTIPLE_SUPP (1 << 28)
  118. #define GPMC_CONFIG1_WRITETYPE_ASYNC (0 << 27)
  119. #define GPMC_CONFIG1_WRITETYPE_SYNC (1 << 27)
  120. #define GPMC_CONFIG1_CLKACTIVATIONTIME(val) ((val & 3) << 25)
  121. /** CLKACTIVATIONTIME Max Ticks */
  122. #define GPMC_CONFIG1_CLKACTIVATIONTIME_MAX 2
  123. #define GPMC_CONFIG1_PAGE_LEN(val) ((val & 3) << 23)
  124. /** ATTACHEDDEVICEPAGELENGTH Max Value */
  125. #define GPMC_CONFIG1_ATTACHEDDEVICEPAGELENGTH_MAX 2
  126. #define GPMC_CONFIG1_WAIT_READ_MON (1 << 22)
  127. #define GPMC_CONFIG1_WAIT_WRITE_MON (1 << 21)
  128. #define GPMC_CONFIG1_WAIT_MON_TIME(val) ((val & 3) << 18)
  129. /** WAITMONITORINGTIME Max Ticks */
  130. #define GPMC_CONFIG1_WAITMONITORINGTIME_MAX 2
  131. #define GPMC_CONFIG1_WAIT_PIN_SEL(val) ((val & 3) << 16)
  132. #define GPMC_CONFIG1_DEVICESIZE(val) ((val & 3) << 12)
  133. #define GPMC_CONFIG1_DEVICESIZE_16 GPMC_CONFIG1_DEVICESIZE(1)
  134. /** DEVICESIZE Max Value */
  135. #define GPMC_CONFIG1_DEVICESIZE_MAX 1
  136. #define GPMC_CONFIG1_DEVICETYPE(val) ((val & 3) << 10)
  137. #define GPMC_CONFIG1_DEVICETYPE_NOR GPMC_CONFIG1_DEVICETYPE(0)
  138. #define GPMC_CONFIG1_MUXTYPE(val) ((val & 3) << 8)
  139. #define GPMC_CONFIG1_TIME_PARA_GRAN (1 << 4)
  140. #define GPMC_CONFIG1_FCLK_DIV(val) (val & 3)
  141. #define GPMC_CONFIG1_FCLK_DIV2 (GPMC_CONFIG1_FCLK_DIV(1))
  142. #define GPMC_CONFIG1_FCLK_DIV3 (GPMC_CONFIG1_FCLK_DIV(2))
  143. #define GPMC_CONFIG1_FCLK_DIV4 (GPMC_CONFIG1_FCLK_DIV(3))
  144. #define GPMC_CONFIG7_CSVALID (1 << 6)
  145. #define GPMC_CONFIG7_BASEADDRESS_MASK 0x3f
  146. #define GPMC_CONFIG7_CSVALID_MASK BIT(6)
  147. #define GPMC_CONFIG7_MASKADDRESS_OFFSET 8
  148. #define GPMC_CONFIG7_MASKADDRESS_MASK (0xf << GPMC_CONFIG7_MASKADDRESS_OFFSET)
  149. /* All CONFIG7 bits except reserved bits */
  150. #define GPMC_CONFIG7_MASK (GPMC_CONFIG7_BASEADDRESS_MASK | \
  151. GPMC_CONFIG7_CSVALID_MASK | \
  152. GPMC_CONFIG7_MASKADDRESS_MASK)
  153. #define GPMC_DEVICETYPE_NOR 0
  154. #define GPMC_DEVICETYPE_NAND 2
  155. #define GPMC_CONFIG_WRITEPROTECT 0x00000010
  156. #define WR_RD_PIN_MONITORING 0x00600000
  157. #define GPMC_ENABLE_IRQ 0x0000000d
  158. /* ECC commands */
  159. #define GPMC_ECC_READ 0 /* Reset Hardware ECC for read */
  160. #define GPMC_ECC_WRITE 1 /* Reset Hardware ECC for write */
  161. #define GPMC_ECC_READSYN 2 /* Reset before syndrom is read back */
  162. /* XXX: Only NAND irq has been considered,currently these are the only ones used
  163. */
  164. #define GPMC_NR_IRQ 2
  165. enum gpmc_clk_domain {
  166. GPMC_CD_FCLK,
  167. GPMC_CD_CLK
  168. };
  169. struct gpmc_cs_data {
  170. const char *name;
  171. #define GPMC_CS_RESERVED (1 << 0)
  172. u32 flags;
  173. struct resource mem;
  174. };
  175. struct gpmc_client_irq {
  176. unsigned irq;
  177. u32 bitmask;
  178. };
  179. /* Structure to save gpmc cs context */
  180. struct gpmc_cs_config {
  181. u32 config1;
  182. u32 config2;
  183. u32 config3;
  184. u32 config4;
  185. u32 config5;
  186. u32 config6;
  187. u32 config7;
  188. int is_valid;
  189. };
  190. /*
  191. * Structure to save/restore gpmc context
  192. * to support core off on OMAP3
  193. */
  194. struct omap3_gpmc_regs {
  195. u32 sysconfig;
  196. u32 irqenable;
  197. u32 timeout_ctrl;
  198. u32 config;
  199. u32 prefetch_config1;
  200. u32 prefetch_config2;
  201. u32 prefetch_control;
  202. struct gpmc_cs_config cs_context[GPMC_CS_NUM];
  203. };
  204. static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
  205. static struct irq_chip gpmc_irq_chip;
  206. static int gpmc_irq_start;
  207. static struct resource gpmc_mem_root;
  208. static struct gpmc_cs_data gpmc_cs[GPMC_CS_NUM];
  209. static DEFINE_SPINLOCK(gpmc_mem_lock);
  210. /* Define chip-selects as reserved by default until probe completes */
  211. static unsigned int gpmc_cs_num = GPMC_CS_NUM;
  212. static unsigned int gpmc_nr_waitpins;
  213. static struct device *gpmc_dev;
  214. static int gpmc_irq;
  215. static resource_size_t phys_base, mem_size;
  216. static unsigned gpmc_capability;
  217. static void __iomem *gpmc_base;
  218. static struct clk *gpmc_l3_clk;
  219. static irqreturn_t gpmc_handle_irq(int irq, void *dev);
  220. static void gpmc_write_reg(int idx, u32 val)
  221. {
  222. writel_relaxed(val, gpmc_base + idx);
  223. }
  224. static u32 gpmc_read_reg(int idx)
  225. {
  226. return readl_relaxed(gpmc_base + idx);
  227. }
  228. void gpmc_cs_write_reg(int cs, int idx, u32 val)
  229. {
  230. void __iomem *reg_addr;
  231. reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
  232. writel_relaxed(val, reg_addr);
  233. }
  234. static u32 gpmc_cs_read_reg(int cs, int idx)
  235. {
  236. void __iomem *reg_addr;
  237. reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
  238. return readl_relaxed(reg_addr);
  239. }
  240. /* TODO: Add support for gpmc_fck to clock framework and use it */
  241. static unsigned long gpmc_get_fclk_period(void)
  242. {
  243. unsigned long rate = clk_get_rate(gpmc_l3_clk);
  244. rate /= 1000;
  245. rate = 1000000000 / rate; /* In picoseconds */
  246. return rate;
  247. }
  248. /**
  249. * gpmc_get_clk_period - get period of selected clock domain in ps
  250. * @cs Chip Select Region.
  251. * @cd Clock Domain.
  252. *
  253. * GPMC_CS_CONFIG1 GPMCFCLKDIVIDER for cs has to be setup
  254. * prior to calling this function with GPMC_CD_CLK.
  255. */
  256. static unsigned long gpmc_get_clk_period(int cs, enum gpmc_clk_domain cd)
  257. {
  258. unsigned long tick_ps = gpmc_get_fclk_period();
  259. u32 l;
  260. int div;
  261. switch (cd) {
  262. case GPMC_CD_CLK:
  263. /* get current clk divider */
  264. l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
  265. div = (l & 0x03) + 1;
  266. /* get GPMC_CLK period */
  267. tick_ps *= div;
  268. break;
  269. case GPMC_CD_FCLK:
  270. /* FALL-THROUGH */
  271. default:
  272. break;
  273. }
  274. return tick_ps;
  275. }
  276. static unsigned int gpmc_ns_to_clk_ticks(unsigned int time_ns, int cs,
  277. enum gpmc_clk_domain cd)
  278. {
  279. unsigned long tick_ps;
  280. /* Calculate in picosecs to yield more exact results */
  281. tick_ps = gpmc_get_clk_period(cs, cd);
  282. return (time_ns * 1000 + tick_ps - 1) / tick_ps;
  283. }
  284. static unsigned int gpmc_ns_to_ticks(unsigned int time_ns)
  285. {
  286. return gpmc_ns_to_clk_ticks(time_ns, /* any CS */ 0, GPMC_CD_FCLK);
  287. }
  288. static unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
  289. {
  290. unsigned long tick_ps;
  291. /* Calculate in picosecs to yield more exact results */
  292. tick_ps = gpmc_get_fclk_period();
  293. return (time_ps + tick_ps - 1) / tick_ps;
  294. }
  295. unsigned int gpmc_clk_ticks_to_ns(unsigned ticks, int cs,
  296. enum gpmc_clk_domain cd)
  297. {
  298. return ticks * gpmc_get_clk_period(cs, cd) / 1000;
  299. }
  300. unsigned int gpmc_ticks_to_ns(unsigned int ticks)
  301. {
  302. return gpmc_clk_ticks_to_ns(ticks, /* any CS */ 0, GPMC_CD_FCLK);
  303. }
  304. static unsigned int gpmc_ticks_to_ps(unsigned int ticks)
  305. {
  306. return ticks * gpmc_get_fclk_period();
  307. }
  308. static unsigned int gpmc_round_ps_to_ticks(unsigned int time_ps)
  309. {
  310. unsigned long ticks = gpmc_ps_to_ticks(time_ps);
  311. return ticks * gpmc_get_fclk_period();
  312. }
  313. static inline void gpmc_cs_modify_reg(int cs, int reg, u32 mask, bool value)
  314. {
  315. u32 l;
  316. l = gpmc_cs_read_reg(cs, reg);
  317. if (value)
  318. l |= mask;
  319. else
  320. l &= ~mask;
  321. gpmc_cs_write_reg(cs, reg, l);
  322. }
  323. static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
  324. {
  325. gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG1,
  326. GPMC_CONFIG1_TIME_PARA_GRAN,
  327. p->time_para_granularity);
  328. gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG2,
  329. GPMC_CONFIG2_CSEXTRADELAY, p->cs_extra_delay);
  330. gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG3,
  331. GPMC_CONFIG3_ADVEXTRADELAY, p->adv_extra_delay);
  332. gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
  333. GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
  334. gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
  335. GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
  336. gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
  337. GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
  338. p->cycle2cyclesamecsen);
  339. gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
  340. GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN,
  341. p->cycle2cyclediffcsen);
  342. }
  343. #ifdef CONFIG_OMAP_GPMC_DEBUG
  344. /**
  345. * get_gpmc_timing_reg - read a timing parameter and print DTS settings for it.
  346. * @cs: Chip Select Region
  347. * @reg: GPMC_CS_CONFIGn register offset.
  348. * @st_bit: Start Bit
  349. * @end_bit: End Bit. Must be >= @st_bit.
  350. * @ma:x Maximum parameter value (before optional @shift).
  351. * If 0, maximum is as high as @st_bit and @end_bit allow.
  352. * @name: DTS node name, w/o "gpmc,"
  353. * @cd: Clock Domain of timing parameter.
  354. * @shift: Parameter value left shifts @shift, which is then printed instead of value.
  355. * @raw: Raw Format Option.
  356. * raw format: gpmc,name = <value>
  357. * tick format: gpmc,name = <value> /&zwj;* x ns -- y ns; x ticks *&zwj;/
  358. * Where x ns -- y ns result in the same tick value.
  359. * When @max is exceeded, "invalid" is printed inside comment.
  360. * @noval: Parameter values equal to 0 are not printed.
  361. * @return: Specified timing parameter (after optional @shift).
  362. *
  363. */
  364. static int get_gpmc_timing_reg(
  365. /* timing specifiers */
  366. int cs, int reg, int st_bit, int end_bit, int max,
  367. const char *name, const enum gpmc_clk_domain cd,
  368. /* value transform */
  369. int shift,
  370. /* format specifiers */
  371. bool raw, bool noval)
  372. {
  373. u32 l;
  374. int nr_bits;
  375. int mask;
  376. bool invalid;
  377. l = gpmc_cs_read_reg(cs, reg);
  378. nr_bits = end_bit - st_bit + 1;
  379. mask = (1 << nr_bits) - 1;
  380. l = (l >> st_bit) & mask;
  381. if (!max)
  382. max = mask;
  383. invalid = l > max;
  384. if (shift)
  385. l = (shift << l);
  386. if (noval && (l == 0))
  387. return 0;
  388. if (!raw) {
  389. /* DTS tick format for timings in ns */
  390. unsigned int time_ns;
  391. unsigned int time_ns_min = 0;
  392. if (l)
  393. time_ns_min = gpmc_clk_ticks_to_ns(l - 1, cs, cd) + 1;
  394. time_ns = gpmc_clk_ticks_to_ns(l, cs, cd);
  395. pr_info("gpmc,%s = <%u> /* %u ns - %u ns; %i ticks%s*/\n",
  396. name, time_ns, time_ns_min, time_ns, l,
  397. invalid ? "; invalid " : " ");
  398. } else {
  399. /* raw format */
  400. pr_info("gpmc,%s = <%u>%s\n", name, l,
  401. invalid ? " /* invalid */" : "");
  402. }
  403. return l;
  404. }
  405. #define GPMC_PRINT_CONFIG(cs, config) \
  406. pr_info("cs%i %s: 0x%08x\n", cs, #config, \
  407. gpmc_cs_read_reg(cs, config))
  408. #define GPMC_GET_RAW(reg, st, end, field) \
  409. get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, GPMC_CD_FCLK, 0, 1, 0)
  410. #define GPMC_GET_RAW_MAX(reg, st, end, max, field) \
  411. get_gpmc_timing_reg(cs, (reg), (st), (end), (max), field, GPMC_CD_FCLK, 0, 1, 0)
  412. #define GPMC_GET_RAW_BOOL(reg, st, end, field) \
  413. get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, GPMC_CD_FCLK, 0, 1, 1)
  414. #define GPMC_GET_RAW_SHIFT_MAX(reg, st, end, shift, max, field) \
  415. get_gpmc_timing_reg(cs, (reg), (st), (end), (max), field, GPMC_CD_FCLK, (shift), 1, 1)
  416. #define GPMC_GET_TICKS(reg, st, end, field) \
  417. get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, GPMC_CD_FCLK, 0, 0, 0)
  418. #define GPMC_GET_TICKS_CD(reg, st, end, field, cd) \
  419. get_gpmc_timing_reg(cs, (reg), (st), (end), 0, field, (cd), 0, 0, 0)
  420. #define GPMC_GET_TICKS_CD_MAX(reg, st, end, max, field, cd) \
  421. get_gpmc_timing_reg(cs, (reg), (st), (end), (max), field, (cd), 0, 0, 0)
  422. static void gpmc_show_regs(int cs, const char *desc)
  423. {
  424. pr_info("gpmc cs%i %s:\n", cs, desc);
  425. GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG1);
  426. GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG2);
  427. GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG3);
  428. GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG4);
  429. GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG5);
  430. GPMC_PRINT_CONFIG(cs, GPMC_CS_CONFIG6);
  431. }
  432. /*
  433. * Note that gpmc,wait-pin handing wrongly assumes bit 8 is available,
  434. * see commit c9fb809.
  435. */
  436. static void gpmc_cs_show_timings(int cs, const char *desc)
  437. {
  438. gpmc_show_regs(cs, desc);
  439. pr_info("gpmc cs%i access configuration:\n", cs);
  440. GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 4, 4, "time-para-granularity");
  441. GPMC_GET_RAW(GPMC_CS_CONFIG1, 8, 9, "mux-add-data");
  442. GPMC_GET_RAW_MAX(GPMC_CS_CONFIG1, 12, 13,
  443. GPMC_CONFIG1_DEVICESIZE_MAX, "device-width");
  444. GPMC_GET_RAW(GPMC_CS_CONFIG1, 16, 17, "wait-pin");
  445. GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 21, 21, "wait-on-write");
  446. GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 22, 22, "wait-on-read");
  447. GPMC_GET_RAW_SHIFT_MAX(GPMC_CS_CONFIG1, 23, 24, 4,
  448. GPMC_CONFIG1_ATTACHEDDEVICEPAGELENGTH_MAX,
  449. "burst-length");
  450. GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 27, 27, "sync-write");
  451. GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 28, 28, "burst-write");
  452. GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 29, 29, "gpmc,sync-read");
  453. GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 30, 30, "burst-read");
  454. GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG1, 31, 31, "burst-wrap");
  455. GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG2, 7, 7, "cs-extra-delay");
  456. GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG3, 7, 7, "adv-extra-delay");
  457. GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG4, 23, 23, "we-extra-delay");
  458. GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG4, 7, 7, "oe-extra-delay");
  459. GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG6, 7, 7, "cycle2cycle-samecsen");
  460. GPMC_GET_RAW_BOOL(GPMC_CS_CONFIG6, 6, 6, "cycle2cycle-diffcsen");
  461. pr_info("gpmc cs%i timings configuration:\n", cs);
  462. GPMC_GET_TICKS(GPMC_CS_CONFIG2, 0, 3, "cs-on-ns");
  463. GPMC_GET_TICKS(GPMC_CS_CONFIG2, 8, 12, "cs-rd-off-ns");
  464. GPMC_GET_TICKS(GPMC_CS_CONFIG2, 16, 20, "cs-wr-off-ns");
  465. GPMC_GET_TICKS(GPMC_CS_CONFIG3, 0, 3, "adv-on-ns");
  466. GPMC_GET_TICKS(GPMC_CS_CONFIG3, 8, 12, "adv-rd-off-ns");
  467. GPMC_GET_TICKS(GPMC_CS_CONFIG3, 16, 20, "adv-wr-off-ns");
  468. GPMC_GET_TICKS(GPMC_CS_CONFIG4, 0, 3, "oe-on-ns");
  469. GPMC_GET_TICKS(GPMC_CS_CONFIG4, 8, 12, "oe-off-ns");
  470. GPMC_GET_TICKS(GPMC_CS_CONFIG4, 16, 19, "we-on-ns");
  471. GPMC_GET_TICKS(GPMC_CS_CONFIG4, 24, 28, "we-off-ns");
  472. GPMC_GET_TICKS(GPMC_CS_CONFIG5, 0, 4, "rd-cycle-ns");
  473. GPMC_GET_TICKS(GPMC_CS_CONFIG5, 8, 12, "wr-cycle-ns");
  474. GPMC_GET_TICKS(GPMC_CS_CONFIG5, 16, 20, "access-ns");
  475. GPMC_GET_TICKS(GPMC_CS_CONFIG5, 24, 27, "page-burst-access-ns");
  476. GPMC_GET_TICKS(GPMC_CS_CONFIG6, 0, 3, "bus-turnaround-ns");
  477. GPMC_GET_TICKS(GPMC_CS_CONFIG6, 8, 11, "cycle2cycle-delay-ns");
  478. GPMC_GET_TICKS_CD_MAX(GPMC_CS_CONFIG1, 18, 19,
  479. GPMC_CONFIG1_WAITMONITORINGTIME_MAX,
  480. "wait-monitoring-ns", GPMC_CD_CLK);
  481. GPMC_GET_TICKS_CD_MAX(GPMC_CS_CONFIG1, 25, 26,
  482. GPMC_CONFIG1_CLKACTIVATIONTIME_MAX,
  483. "clk-activation-ns", GPMC_CD_FCLK);
  484. GPMC_GET_TICKS(GPMC_CS_CONFIG6, 16, 19, "wr-data-mux-bus-ns");
  485. GPMC_GET_TICKS(GPMC_CS_CONFIG6, 24, 28, "wr-access-ns");
  486. }
  487. #else
  488. static inline void gpmc_cs_show_timings(int cs, const char *desc)
  489. {
  490. }
  491. #endif
  492. /**
  493. * set_gpmc_timing_reg - set a single timing parameter for Chip Select Region.
  494. * Caller is expected to have initialized CONFIG1 GPMCFCLKDIVIDER
  495. * prior to calling this function with @cd equal to GPMC_CD_CLK.
  496. *
  497. * @cs: Chip Select Region.
  498. * @reg: GPMC_CS_CONFIGn register offset.
  499. * @st_bit: Start Bit
  500. * @end_bit: End Bit. Must be >= @st_bit.
  501. * @max: Maximum parameter value.
  502. * If 0, maximum is as high as @st_bit and @end_bit allow.
  503. * @time: Timing parameter in ns.
  504. * @cd: Timing parameter clock domain.
  505. * @name: Timing parameter name.
  506. * @return: 0 on success, -1 on error.
  507. */
  508. static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit, int max,
  509. int time, enum gpmc_clk_domain cd, const char *name)
  510. {
  511. u32 l;
  512. int ticks, mask, nr_bits;
  513. if (time == 0)
  514. ticks = 0;
  515. else
  516. ticks = gpmc_ns_to_clk_ticks(time, cs, cd);
  517. nr_bits = end_bit - st_bit + 1;
  518. mask = (1 << nr_bits) - 1;
  519. if (!max)
  520. max = mask;
  521. if (ticks > max) {
  522. pr_err("%s: GPMC CS%d: %s %d ns, %d ticks > %d ticks\n",
  523. __func__, cs, name, time, ticks, max);
  524. return -1;
  525. }
  526. l = gpmc_cs_read_reg(cs, reg);
  527. #ifdef CONFIG_OMAP_GPMC_DEBUG
  528. pr_info(
  529. "GPMC CS%d: %-17s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
  530. cs, name, ticks, gpmc_get_clk_period(cs, cd) * ticks / 1000,
  531. (l >> st_bit) & mask, time);
  532. #endif
  533. l &= ~(mask << st_bit);
  534. l |= ticks << st_bit;
  535. gpmc_cs_write_reg(cs, reg, l);
  536. return 0;
  537. }
  538. #define GPMC_SET_ONE_CD_MAX(reg, st, end, max, field, cd) \
  539. if (set_gpmc_timing_reg(cs, (reg), (st), (end), (max), \
  540. t->field, (cd), #field) < 0) \
  541. return -1
  542. #define GPMC_SET_ONE(reg, st, end, field) \
  543. GPMC_SET_ONE_CD_MAX(reg, st, end, 0, field, GPMC_CD_FCLK)
  544. /**
  545. * gpmc_calc_waitmonitoring_divider - calculate proper GPMCFCLKDIVIDER based on WAITMONITORINGTIME
  546. * WAITMONITORINGTIME will be _at least_ as long as desired, i.e.
  547. * read --> don't sample bus too early
  548. * write --> data is longer on bus
  549. *
  550. * Formula:
  551. * gpmc_clk_div + 1 = ceil(ceil(waitmonitoringtime_ns / gpmc_fclk_ns)
  552. * / waitmonitoring_ticks)
  553. * WAITMONITORINGTIME resulting in 0 or 1 tick with div = 1 are caught by
  554. * div <= 0 check.
  555. *
  556. * @wait_monitoring: WAITMONITORINGTIME in ns.
  557. * @return: -1 on failure to scale, else proper divider > 0.
  558. */
  559. static int gpmc_calc_waitmonitoring_divider(unsigned int wait_monitoring)
  560. {
  561. int div = gpmc_ns_to_ticks(wait_monitoring);
  562. div += GPMC_CONFIG1_WAITMONITORINGTIME_MAX - 1;
  563. div /= GPMC_CONFIG1_WAITMONITORINGTIME_MAX;
  564. if (div > 4)
  565. return -1;
  566. if (div <= 0)
  567. div = 1;
  568. return div;
  569. }
  570. /**
  571. * gpmc_calc_divider - calculate GPMC_FCLK divider for sync_clk GPMC_CLK period.
  572. * @sync_clk: GPMC_CLK period in ps.
  573. * @return: Returns at least 1 if GPMC_FCLK can be divided to GPMC_CLK.
  574. * Else, returns -1.
  575. */
  576. int gpmc_calc_divider(unsigned int sync_clk)
  577. {
  578. int div = gpmc_ps_to_ticks(sync_clk);
  579. if (div > 4)
  580. return -1;
  581. if (div <= 0)
  582. div = 1;
  583. return div;
  584. }
  585. /**
  586. * gpmc_cs_set_timings - program timing parameters for Chip Select Region.
  587. * @cs: Chip Select Region.
  588. * @t: GPMC timing parameters.
  589. * @s: GPMC timing settings.
  590. * @return: 0 on success, -1 on error.
  591. */
  592. int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t,
  593. const struct gpmc_settings *s)
  594. {
  595. int div;
  596. u32 l;
  597. gpmc_cs_show_timings(cs, "before gpmc_cs_set_timings");
  598. div = gpmc_calc_divider(t->sync_clk);
  599. if (div < 0)
  600. return div;
  601. /*
  602. * See if we need to change the divider for waitmonitoringtime.
  603. *
  604. * Calculate GPMCFCLKDIVIDER independent of gpmc,sync-clk-ps in DT for
  605. * pure asynchronous accesses, i.e. both read and write asynchronous.
  606. * However, only do so if WAITMONITORINGTIME is actually used, i.e.
  607. * either WAITREADMONITORING or WAITWRITEMONITORING is set.
  608. *
  609. * This statement must not change div to scale async WAITMONITORINGTIME
  610. * to protect mixed synchronous and asynchronous accesses.
  611. *
  612. * We raise an error later if WAITMONITORINGTIME does not fit.
  613. */
  614. if (!s->sync_read && !s->sync_write &&
  615. (s->wait_on_read || s->wait_on_write)
  616. ) {
  617. div = gpmc_calc_waitmonitoring_divider(t->wait_monitoring);
  618. if (div < 0) {
  619. pr_err("%s: waitmonitoringtime %3d ns too large for greatest gpmcfclkdivider.\n",
  620. __func__,
  621. t->wait_monitoring
  622. );
  623. return -1;
  624. }
  625. }
  626. GPMC_SET_ONE(GPMC_CS_CONFIG2, 0, 3, cs_on);
  627. GPMC_SET_ONE(GPMC_CS_CONFIG2, 8, 12, cs_rd_off);
  628. GPMC_SET_ONE(GPMC_CS_CONFIG2, 16, 20, cs_wr_off);
  629. GPMC_SET_ONE(GPMC_CS_CONFIG3, 0, 3, adv_on);
  630. GPMC_SET_ONE(GPMC_CS_CONFIG3, 8, 12, adv_rd_off);
  631. GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off);
  632. GPMC_SET_ONE(GPMC_CS_CONFIG4, 0, 3, oe_on);
  633. GPMC_SET_ONE(GPMC_CS_CONFIG4, 8, 12, oe_off);
  634. GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on);
  635. GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off);
  636. GPMC_SET_ONE(GPMC_CS_CONFIG5, 0, 4, rd_cycle);
  637. GPMC_SET_ONE(GPMC_CS_CONFIG5, 8, 12, wr_cycle);
  638. GPMC_SET_ONE(GPMC_CS_CONFIG5, 16, 20, access);
  639. GPMC_SET_ONE(GPMC_CS_CONFIG5, 24, 27, page_burst_access);
  640. GPMC_SET_ONE(GPMC_CS_CONFIG6, 0, 3, bus_turnaround);
  641. GPMC_SET_ONE(GPMC_CS_CONFIG6, 8, 11, cycle2cycle_delay);
  642. if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
  643. GPMC_SET_ONE(GPMC_CS_CONFIG6, 16, 19, wr_data_mux_bus);
  644. if (gpmc_capability & GPMC_HAS_WR_ACCESS)
  645. GPMC_SET_ONE(GPMC_CS_CONFIG6, 24, 28, wr_access);
  646. l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
  647. l &= ~0x03;
  648. l |= (div - 1);
  649. gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l);
  650. GPMC_SET_ONE_CD_MAX(GPMC_CS_CONFIG1, 18, 19,
  651. GPMC_CONFIG1_WAITMONITORINGTIME_MAX,
  652. wait_monitoring, GPMC_CD_CLK);
  653. GPMC_SET_ONE_CD_MAX(GPMC_CS_CONFIG1, 25, 26,
  654. GPMC_CONFIG1_CLKACTIVATIONTIME_MAX,
  655. clk_activation, GPMC_CD_FCLK);
  656. #ifdef CONFIG_OMAP_GPMC_DEBUG
  657. pr_info("GPMC CS%d CLK period is %lu ns (div %d)\n",
  658. cs, (div * gpmc_get_fclk_period()) / 1000, div);
  659. #endif
  660. gpmc_cs_bool_timings(cs, &t->bool_timings);
  661. gpmc_cs_show_timings(cs, "after gpmc_cs_set_timings");
  662. return 0;
  663. }
  664. static int gpmc_cs_set_memconf(int cs, u32 base, u32 size)
  665. {
  666. u32 l;
  667. u32 mask;
  668. /*
  669. * Ensure that base address is aligned on a
  670. * boundary equal to or greater than size.
  671. */
  672. if (base & (size - 1))
  673. return -EINVAL;
  674. base >>= GPMC_CHUNK_SHIFT;
  675. mask = (1 << GPMC_SECTION_SHIFT) - size;
  676. mask >>= GPMC_CHUNK_SHIFT;
  677. mask <<= GPMC_CONFIG7_MASKADDRESS_OFFSET;
  678. l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
  679. l &= ~GPMC_CONFIG7_MASK;
  680. l |= base & GPMC_CONFIG7_BASEADDRESS_MASK;
  681. l |= mask & GPMC_CONFIG7_MASKADDRESS_MASK;
  682. l |= GPMC_CONFIG7_CSVALID;
  683. gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
  684. return 0;
  685. }
  686. static void gpmc_cs_enable_mem(int cs)
  687. {
  688. u32 l;
  689. l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
  690. l |= GPMC_CONFIG7_CSVALID;
  691. gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
  692. }
  693. static void gpmc_cs_disable_mem(int cs)
  694. {
  695. u32 l;
  696. l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
  697. l &= ~GPMC_CONFIG7_CSVALID;
  698. gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
  699. }
  700. static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size)
  701. {
  702. u32 l;
  703. u32 mask;
  704. l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
  705. *base = (l & 0x3f) << GPMC_CHUNK_SHIFT;
  706. mask = (l >> 8) & 0x0f;
  707. *size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT);
  708. }
  709. static int gpmc_cs_mem_enabled(int cs)
  710. {
  711. u32 l;
  712. l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
  713. return l & GPMC_CONFIG7_CSVALID;
  714. }
  715. static void gpmc_cs_set_reserved(int cs, int reserved)
  716. {
  717. struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
  718. gpmc->flags |= GPMC_CS_RESERVED;
  719. }
  720. static bool gpmc_cs_reserved(int cs)
  721. {
  722. struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
  723. return gpmc->flags & GPMC_CS_RESERVED;
  724. }
  725. static void gpmc_cs_set_name(int cs, const char *name)
  726. {
  727. struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
  728. gpmc->name = name;
  729. }
  730. static const char *gpmc_cs_get_name(int cs)
  731. {
  732. struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
  733. return gpmc->name;
  734. }
  735. static unsigned long gpmc_mem_align(unsigned long size)
  736. {
  737. int order;
  738. size = (size - 1) >> (GPMC_CHUNK_SHIFT - 1);
  739. order = GPMC_CHUNK_SHIFT - 1;
  740. do {
  741. size >>= 1;
  742. order++;
  743. } while (size);
  744. size = 1 << order;
  745. return size;
  746. }
  747. static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size)
  748. {
  749. struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
  750. struct resource *res = &gpmc->mem;
  751. int r;
  752. size = gpmc_mem_align(size);
  753. spin_lock(&gpmc_mem_lock);
  754. res->start = base;
  755. res->end = base + size - 1;
  756. r = request_resource(&gpmc_mem_root, res);
  757. spin_unlock(&gpmc_mem_lock);
  758. return r;
  759. }
  760. static int gpmc_cs_delete_mem(int cs)
  761. {
  762. struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
  763. struct resource *res = &gpmc->mem;
  764. int r;
  765. spin_lock(&gpmc_mem_lock);
  766. r = release_resource(res);
  767. res->start = 0;
  768. res->end = 0;
  769. spin_unlock(&gpmc_mem_lock);
  770. return r;
  771. }
  772. /**
  773. * gpmc_cs_remap - remaps a chip-select physical base address
  774. * @cs: chip-select to remap
  775. * @base: physical base address to re-map chip-select to
  776. *
  777. * Re-maps a chip-select to a new physical base address specified by
  778. * "base". Returns 0 on success and appropriate negative error code
  779. * on failure.
  780. */
  781. static int gpmc_cs_remap(int cs, u32 base)
  782. {
  783. int ret;
  784. u32 old_base, size;
  785. if (cs > gpmc_cs_num) {
  786. pr_err("%s: requested chip-select is disabled\n", __func__);
  787. return -ENODEV;
  788. }
  789. /*
  790. * Make sure we ignore any device offsets from the GPMC partition
  791. * allocated for the chip select and that the new base confirms
  792. * to the GPMC 16MB minimum granularity.
  793. */
  794. base &= ~(SZ_16M - 1);
  795. gpmc_cs_get_memconf(cs, &old_base, &size);
  796. if (base == old_base)
  797. return 0;
  798. ret = gpmc_cs_delete_mem(cs);
  799. if (ret < 0)
  800. return ret;
  801. ret = gpmc_cs_insert_mem(cs, base, size);
  802. if (ret < 0)
  803. return ret;
  804. ret = gpmc_cs_set_memconf(cs, base, size);
  805. return ret;
  806. }
  807. int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
  808. {
  809. struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
  810. struct resource *res = &gpmc->mem;
  811. int r = -1;
  812. if (cs > gpmc_cs_num) {
  813. pr_err("%s: requested chip-select is disabled\n", __func__);
  814. return -ENODEV;
  815. }
  816. size = gpmc_mem_align(size);
  817. if (size > (1 << GPMC_SECTION_SHIFT))
  818. return -ENOMEM;
  819. spin_lock(&gpmc_mem_lock);
  820. if (gpmc_cs_reserved(cs)) {
  821. r = -EBUSY;
  822. goto out;
  823. }
  824. if (gpmc_cs_mem_enabled(cs))
  825. r = adjust_resource(res, res->start & ~(size - 1), size);
  826. if (r < 0)
  827. r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0,
  828. size, NULL, NULL);
  829. if (r < 0)
  830. goto out;
  831. /* Disable CS while changing base address and size mask */
  832. gpmc_cs_disable_mem(cs);
  833. r = gpmc_cs_set_memconf(cs, res->start, resource_size(res));
  834. if (r < 0) {
  835. release_resource(res);
  836. goto out;
  837. }
  838. /* Enable CS */
  839. gpmc_cs_enable_mem(cs);
  840. *base = res->start;
  841. gpmc_cs_set_reserved(cs, 1);
  842. out:
  843. spin_unlock(&gpmc_mem_lock);
  844. return r;
  845. }
  846. EXPORT_SYMBOL(gpmc_cs_request);
  847. void gpmc_cs_free(int cs)
  848. {
  849. struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
  850. struct resource *res = &gpmc->mem;
  851. spin_lock(&gpmc_mem_lock);
  852. if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
  853. printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
  854. BUG();
  855. spin_unlock(&gpmc_mem_lock);
  856. return;
  857. }
  858. gpmc_cs_disable_mem(cs);
  859. if (res->flags)
  860. release_resource(res);
  861. gpmc_cs_set_reserved(cs, 0);
  862. spin_unlock(&gpmc_mem_lock);
  863. }
  864. EXPORT_SYMBOL(gpmc_cs_free);
  865. /**
  866. * gpmc_configure - write request to configure gpmc
  867. * @cmd: command type
  868. * @wval: value to write
  869. * @return status of the operation
  870. */
  871. int gpmc_configure(int cmd, int wval)
  872. {
  873. u32 regval;
  874. switch (cmd) {
  875. case GPMC_ENABLE_IRQ:
  876. gpmc_write_reg(GPMC_IRQENABLE, wval);
  877. break;
  878. case GPMC_SET_IRQ_STATUS:
  879. gpmc_write_reg(GPMC_IRQSTATUS, wval);
  880. break;
  881. case GPMC_CONFIG_WP:
  882. regval = gpmc_read_reg(GPMC_CONFIG);
  883. if (wval)
  884. regval &= ~GPMC_CONFIG_WRITEPROTECT; /* WP is ON */
  885. else
  886. regval |= GPMC_CONFIG_WRITEPROTECT; /* WP is OFF */
  887. gpmc_write_reg(GPMC_CONFIG, regval);
  888. break;
  889. default:
  890. pr_err("%s: command not supported\n", __func__);
  891. return -EINVAL;
  892. }
  893. return 0;
  894. }
  895. EXPORT_SYMBOL(gpmc_configure);
  896. void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
  897. {
  898. int i;
  899. reg->gpmc_status = gpmc_base + GPMC_STATUS;
  900. reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET +
  901. GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs;
  902. reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET +
  903. GPMC_CS_NAND_ADDRESS + GPMC_CS_SIZE * cs;
  904. reg->gpmc_nand_data = gpmc_base + GPMC_CS0_OFFSET +
  905. GPMC_CS_NAND_DATA + GPMC_CS_SIZE * cs;
  906. reg->gpmc_prefetch_config1 = gpmc_base + GPMC_PREFETCH_CONFIG1;
  907. reg->gpmc_prefetch_config2 = gpmc_base + GPMC_PREFETCH_CONFIG2;
  908. reg->gpmc_prefetch_control = gpmc_base + GPMC_PREFETCH_CONTROL;
  909. reg->gpmc_prefetch_status = gpmc_base + GPMC_PREFETCH_STATUS;
  910. reg->gpmc_ecc_config = gpmc_base + GPMC_ECC_CONFIG;
  911. reg->gpmc_ecc_control = gpmc_base + GPMC_ECC_CONTROL;
  912. reg->gpmc_ecc_size_config = gpmc_base + GPMC_ECC_SIZE_CONFIG;
  913. reg->gpmc_ecc1_result = gpmc_base + GPMC_ECC1_RESULT;
  914. for (i = 0; i < GPMC_BCH_NUM_REMAINDER; i++) {
  915. reg->gpmc_bch_result0[i] = gpmc_base + GPMC_ECC_BCH_RESULT_0 +
  916. GPMC_BCH_SIZE * i;
  917. reg->gpmc_bch_result1[i] = gpmc_base + GPMC_ECC_BCH_RESULT_1 +
  918. GPMC_BCH_SIZE * i;
  919. reg->gpmc_bch_result2[i] = gpmc_base + GPMC_ECC_BCH_RESULT_2 +
  920. GPMC_BCH_SIZE * i;
  921. reg->gpmc_bch_result3[i] = gpmc_base + GPMC_ECC_BCH_RESULT_3 +
  922. GPMC_BCH_SIZE * i;
  923. reg->gpmc_bch_result4[i] = gpmc_base + GPMC_ECC_BCH_RESULT_4 +
  924. i * GPMC_BCH_SIZE;
  925. reg->gpmc_bch_result5[i] = gpmc_base + GPMC_ECC_BCH_RESULT_5 +
  926. i * GPMC_BCH_SIZE;
  927. reg->gpmc_bch_result6[i] = gpmc_base + GPMC_ECC_BCH_RESULT_6 +
  928. i * GPMC_BCH_SIZE;
  929. }
  930. }
  931. int gpmc_get_client_irq(unsigned irq_config)
  932. {
  933. int i;
  934. if (hweight32(irq_config) > 1)
  935. return 0;
  936. for (i = 0; i < GPMC_NR_IRQ; i++)
  937. if (gpmc_client_irq[i].bitmask & irq_config)
  938. return gpmc_client_irq[i].irq;
  939. return 0;
  940. }
  941. static int gpmc_irq_endis(unsigned irq, bool endis)
  942. {
  943. int i;
  944. u32 regval;
  945. for (i = 0; i < GPMC_NR_IRQ; i++)
  946. if (irq == gpmc_client_irq[i].irq) {
  947. regval = gpmc_read_reg(GPMC_IRQENABLE);
  948. if (endis)
  949. regval |= gpmc_client_irq[i].bitmask;
  950. else
  951. regval &= ~gpmc_client_irq[i].bitmask;
  952. gpmc_write_reg(GPMC_IRQENABLE, regval);
  953. break;
  954. }
  955. return 0;
  956. }
  957. static void gpmc_irq_disable(struct irq_data *p)
  958. {
  959. gpmc_irq_endis(p->irq, false);
  960. }
  961. static void gpmc_irq_enable(struct irq_data *p)
  962. {
  963. gpmc_irq_endis(p->irq, true);
  964. }
  965. static void gpmc_irq_noop(struct irq_data *data) { }
  966. static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
  967. static int gpmc_setup_irq(void)
  968. {
  969. int i;
  970. u32 regval;
  971. if (!gpmc_irq)
  972. return -EINVAL;
  973. gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0);
  974. if (gpmc_irq_start < 0) {
  975. pr_err("irq_alloc_descs failed\n");
  976. return gpmc_irq_start;
  977. }
  978. gpmc_irq_chip.name = "gpmc";
  979. gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
  980. gpmc_irq_chip.irq_enable = gpmc_irq_enable;
  981. gpmc_irq_chip.irq_disable = gpmc_irq_disable;
  982. gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
  983. gpmc_irq_chip.irq_ack = gpmc_irq_noop;
  984. gpmc_irq_chip.irq_mask = gpmc_irq_noop;
  985. gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
  986. gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
  987. gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
  988. for (i = 0; i < GPMC_NR_IRQ; i++) {
  989. gpmc_client_irq[i].irq = gpmc_irq_start + i;
  990. irq_set_chip_and_handler(gpmc_client_irq[i].irq,
  991. &gpmc_irq_chip, handle_simple_irq);
  992. set_irq_flags(gpmc_client_irq[i].irq,
  993. IRQF_VALID | IRQF_NOAUTOEN);
  994. }
  995. /* Disable interrupts */
  996. gpmc_write_reg(GPMC_IRQENABLE, 0);
  997. /* clear interrupts */
  998. regval = gpmc_read_reg(GPMC_IRQSTATUS);
  999. gpmc_write_reg(GPMC_IRQSTATUS, regval);
  1000. return request_irq(gpmc_irq, gpmc_handle_irq, 0, "gpmc", NULL);
  1001. }
  1002. static int gpmc_free_irq(void)
  1003. {
  1004. int i;
  1005. if (gpmc_irq)
  1006. free_irq(gpmc_irq, NULL);
  1007. for (i = 0; i < GPMC_NR_IRQ; i++) {
  1008. irq_set_handler(gpmc_client_irq[i].irq, NULL);
  1009. irq_set_chip(gpmc_client_irq[i].irq, &no_irq_chip);
  1010. irq_modify_status(gpmc_client_irq[i].irq, 0, 0);
  1011. }
  1012. irq_free_descs(gpmc_irq_start, GPMC_NR_IRQ);
  1013. return 0;
  1014. }
  1015. static void gpmc_mem_exit(void)
  1016. {
  1017. int cs;
  1018. for (cs = 0; cs < gpmc_cs_num; cs++) {
  1019. if (!gpmc_cs_mem_enabled(cs))
  1020. continue;
  1021. gpmc_cs_delete_mem(cs);
  1022. }
  1023. }
  1024. static void gpmc_mem_init(void)
  1025. {
  1026. int cs;
  1027. /*
  1028. * The first 1MB of GPMC address space is typically mapped to
  1029. * the internal ROM. Never allocate the first page, to
  1030. * facilitate bug detection; even if we didn't boot from ROM.
  1031. */
  1032. gpmc_mem_root.start = SZ_1M;
  1033. gpmc_mem_root.end = GPMC_MEM_END;
  1034. /* Reserve all regions that has been set up by bootloader */
  1035. for (cs = 0; cs < gpmc_cs_num; cs++) {
  1036. u32 base, size;
  1037. if (!gpmc_cs_mem_enabled(cs))
  1038. continue;
  1039. gpmc_cs_get_memconf(cs, &base, &size);
  1040. if (gpmc_cs_insert_mem(cs, base, size)) {
  1041. pr_warn("%s: disabling cs %d mapped at 0x%x-0x%x\n",
  1042. __func__, cs, base, base + size);
  1043. gpmc_cs_disable_mem(cs);
  1044. }
  1045. }
  1046. }
  1047. static u32 gpmc_round_ps_to_sync_clk(u32 time_ps, u32 sync_clk)
  1048. {
  1049. u32 temp;
  1050. int div;
  1051. div = gpmc_calc_divider(sync_clk);
  1052. temp = gpmc_ps_to_ticks(time_ps);
  1053. temp = (temp + div - 1) / div;
  1054. return gpmc_ticks_to_ps(temp * div);
  1055. }
  1056. /* XXX: can the cycles be avoided ? */
  1057. static int gpmc_calc_sync_read_timings(struct gpmc_timings *gpmc_t,
  1058. struct gpmc_device_timings *dev_t,
  1059. bool mux)
  1060. {
  1061. u32 temp;
  1062. /* adv_rd_off */
  1063. temp = dev_t->t_avdp_r;
  1064. /* XXX: mux check required ? */
  1065. if (mux) {
  1066. /* XXX: t_avdp not to be required for sync, only added for tusb
  1067. * this indirectly necessitates requirement of t_avdp_r and
  1068. * t_avdp_w instead of having a single t_avdp
  1069. */
  1070. temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_avdh);
  1071. temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
  1072. }
  1073. gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
  1074. /* oe_on */
  1075. temp = dev_t->t_oeasu; /* XXX: remove this ? */
  1076. if (mux) {
  1077. temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_ach);
  1078. temp = max_t(u32, temp, gpmc_t->adv_rd_off +
  1079. gpmc_ticks_to_ps(dev_t->cyc_aavdh_oe));
  1080. }
  1081. gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
  1082. /* access */
  1083. /* XXX: any scope for improvement ?, by combining oe_on
  1084. * and clk_activation, need to check whether
  1085. * access = clk_activation + round to sync clk ?
  1086. */
  1087. temp = max_t(u32, dev_t->t_iaa, dev_t->cyc_iaa * gpmc_t->sync_clk);
  1088. temp += gpmc_t->clk_activation;
  1089. if (dev_t->cyc_oe)
  1090. temp = max_t(u32, temp, gpmc_t->oe_on +
  1091. gpmc_ticks_to_ps(dev_t->cyc_oe));
  1092. gpmc_t->access = gpmc_round_ps_to_ticks(temp);
  1093. gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
  1094. gpmc_t->cs_rd_off = gpmc_t->oe_off;
  1095. /* rd_cycle */
  1096. temp = max_t(u32, dev_t->t_cez_r, dev_t->t_oez);
  1097. temp = gpmc_round_ps_to_sync_clk(temp, gpmc_t->sync_clk) +
  1098. gpmc_t->access;
  1099. /* XXX: barter t_ce_rdyz with t_cez_r ? */
  1100. if (dev_t->t_ce_rdyz)
  1101. temp = max_t(u32, temp, gpmc_t->cs_rd_off + dev_t->t_ce_rdyz);
  1102. gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
  1103. return 0;
  1104. }
  1105. static int gpmc_calc_sync_write_timings(struct gpmc_timings *gpmc_t,
  1106. struct gpmc_device_timings *dev_t,
  1107. bool mux)
  1108. {
  1109. u32 temp;
  1110. /* adv_wr_off */
  1111. temp = dev_t->t_avdp_w;
  1112. if (mux) {
  1113. temp = max_t(u32, temp,
  1114. gpmc_t->clk_activation + dev_t->t_avdh);
  1115. temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
  1116. }
  1117. gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
  1118. /* wr_data_mux_bus */
  1119. temp = max_t(u32, dev_t->t_weasu,
  1120. gpmc_t->clk_activation + dev_t->t_rdyo);
  1121. /* XXX: shouldn't mux be kept as a whole for wr_data_mux_bus ?,
  1122. * and in that case remember to handle we_on properly
  1123. */
  1124. if (mux) {
  1125. temp = max_t(u32, temp,
  1126. gpmc_t->adv_wr_off + dev_t->t_aavdh);
  1127. temp = max_t(u32, temp, gpmc_t->adv_wr_off +
  1128. gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
  1129. }
  1130. gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
  1131. /* we_on */
  1132. if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
  1133. gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
  1134. else
  1135. gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
  1136. /* wr_access */
  1137. /* XXX: gpmc_capability check reqd ? , even if not, will not harm */
  1138. gpmc_t->wr_access = gpmc_t->access;
  1139. /* we_off */
  1140. temp = gpmc_t->we_on + dev_t->t_wpl;
  1141. temp = max_t(u32, temp,
  1142. gpmc_t->wr_access + gpmc_ticks_to_ps(1));
  1143. temp = max_t(u32, temp,
  1144. gpmc_t->we_on + gpmc_ticks_to_ps(dev_t->cyc_wpl));
  1145. gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
  1146. gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
  1147. dev_t->t_wph);
  1148. /* wr_cycle */
  1149. temp = gpmc_round_ps_to_sync_clk(dev_t->t_cez_w, gpmc_t->sync_clk);
  1150. temp += gpmc_t->wr_access;
  1151. /* XXX: barter t_ce_rdyz with t_cez_w ? */
  1152. if (dev_t->t_ce_rdyz)
  1153. temp = max_t(u32, temp,
  1154. gpmc_t->cs_wr_off + dev_t->t_ce_rdyz);
  1155. gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
  1156. return 0;
  1157. }
  1158. static int gpmc_calc_async_read_timings(struct gpmc_timings *gpmc_t,
  1159. struct gpmc_device_timings *dev_t,
  1160. bool mux)
  1161. {
  1162. u32 temp;
  1163. /* adv_rd_off */
  1164. temp = dev_t->t_avdp_r;
  1165. if (mux)
  1166. temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
  1167. gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
  1168. /* oe_on */
  1169. temp = dev_t->t_oeasu;
  1170. if (mux)
  1171. temp = max_t(u32, temp,
  1172. gpmc_t->adv_rd_off + dev_t->t_aavdh);
  1173. gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
  1174. /* access */
  1175. temp = max_t(u32, dev_t->t_iaa, /* XXX: remove t_iaa in async ? */
  1176. gpmc_t->oe_on + dev_t->t_oe);
  1177. temp = max_t(u32, temp,
  1178. gpmc_t->cs_on + dev_t->t_ce);
  1179. temp = max_t(u32, temp,
  1180. gpmc_t->adv_on + dev_t->t_aa);
  1181. gpmc_t->access = gpmc_round_ps_to_ticks(temp);
  1182. gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
  1183. gpmc_t->cs_rd_off = gpmc_t->oe_off;
  1184. /* rd_cycle */
  1185. temp = max_t(u32, dev_t->t_rd_cycle,
  1186. gpmc_t->cs_rd_off + dev_t->t_cez_r);
  1187. temp = max_t(u32, temp, gpmc_t->oe_off + dev_t->t_oez);
  1188. gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
  1189. return 0;
  1190. }
  1191. static int gpmc_calc_async_write_timings(struct gpmc_timings *gpmc_t,
  1192. struct gpmc_device_timings *dev_t,
  1193. bool mux)
  1194. {
  1195. u32 temp;
  1196. /* adv_wr_off */
  1197. temp = dev_t->t_avdp_w;
  1198. if (mux)
  1199. temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
  1200. gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
  1201. /* wr_data_mux_bus */
  1202. temp = dev_t->t_weasu;
  1203. if (mux) {
  1204. temp = max_t(u32, temp, gpmc_t->adv_wr_off + dev_t->t_aavdh);
  1205. temp = max_t(u32, temp, gpmc_t->adv_wr_off +
  1206. gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
  1207. }
  1208. gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
  1209. /* we_on */
  1210. if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
  1211. gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
  1212. else
  1213. gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
  1214. /* we_off */
  1215. temp = gpmc_t->we_on + dev_t->t_wpl;
  1216. gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
  1217. gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
  1218. dev_t->t_wph);
  1219. /* wr_cycle */
  1220. temp = max_t(u32, dev_t->t_wr_cycle,
  1221. gpmc_t->cs_wr_off + dev_t->t_cez_w);
  1222. gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
  1223. return 0;
  1224. }
  1225. static int gpmc_calc_sync_common_timings(struct gpmc_timings *gpmc_t,
  1226. struct gpmc_device_timings *dev_t)
  1227. {
  1228. u32 temp;
  1229. gpmc_t->sync_clk = gpmc_calc_divider(dev_t->clk) *
  1230. gpmc_get_fclk_period();
  1231. gpmc_t->page_burst_access = gpmc_round_ps_to_sync_clk(
  1232. dev_t->t_bacc,
  1233. gpmc_t->sync_clk);
  1234. temp = max_t(u32, dev_t->t_ces, dev_t->t_avds);
  1235. gpmc_t->clk_activation = gpmc_round_ps_to_ticks(temp);
  1236. if (gpmc_calc_divider(gpmc_t->sync_clk) != 1)
  1237. return 0;
  1238. if (dev_t->ce_xdelay)
  1239. gpmc_t->bool_timings.cs_extra_delay = true;
  1240. if (dev_t->avd_xdelay)
  1241. gpmc_t->bool_timings.adv_extra_delay = true;
  1242. if (dev_t->oe_xdelay)
  1243. gpmc_t->bool_timings.oe_extra_delay = true;
  1244. if (dev_t->we_xdelay)
  1245. gpmc_t->bool_timings.we_extra_delay = true;
  1246. return 0;
  1247. }
  1248. static int gpmc_calc_common_timings(struct gpmc_timings *gpmc_t,
  1249. struct gpmc_device_timings *dev_t,
  1250. bool sync)
  1251. {
  1252. u32 temp;
  1253. /* cs_on */
  1254. gpmc_t->cs_on = gpmc_round_ps_to_ticks(dev_t->t_ceasu);
  1255. /* adv_on */
  1256. temp = dev_t->t_avdasu;
  1257. if (dev_t->t_ce_avd)
  1258. temp = max_t(u32, temp,
  1259. gpmc_t->cs_on + dev_t->t_ce_avd);
  1260. gpmc_t->adv_on = gpmc_round_ps_to_ticks(temp);
  1261. if (sync)
  1262. gpmc_calc_sync_common_timings(gpmc_t, dev_t);
  1263. return 0;
  1264. }
  1265. /* TODO: remove this function once all peripherals are confirmed to
  1266. * work with generic timing. Simultaneously gpmc_cs_set_timings()
  1267. * has to be modified to handle timings in ps instead of ns
  1268. */
  1269. static void gpmc_convert_ps_to_ns(struct gpmc_timings *t)
  1270. {
  1271. t->cs_on /= 1000;
  1272. t->cs_rd_off /= 1000;
  1273. t->cs_wr_off /= 1000;
  1274. t->adv_on /= 1000;
  1275. t->adv_rd_off /= 1000;
  1276. t->adv_wr_off /= 1000;
  1277. t->we_on /= 1000;
  1278. t->we_off /= 1000;
  1279. t->oe_on /= 1000;
  1280. t->oe_off /= 1000;
  1281. t->page_burst_access /= 1000;
  1282. t->access /= 1000;
  1283. t->rd_cycle /= 1000;
  1284. t->wr_cycle /= 1000;
  1285. t->bus_turnaround /= 1000;
  1286. t->cycle2cycle_delay /= 1000;
  1287. t->wait_monitoring /= 1000;
  1288. t->clk_activation /= 1000;
  1289. t->wr_access /= 1000;
  1290. t->wr_data_mux_bus /= 1000;
  1291. }
  1292. int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
  1293. struct gpmc_settings *gpmc_s,
  1294. struct gpmc_device_timings *dev_t)
  1295. {
  1296. bool mux = false, sync = false;
  1297. if (gpmc_s) {
  1298. mux = gpmc_s->mux_add_data ? true : false;
  1299. sync = (gpmc_s->sync_read || gpmc_s->sync_write);
  1300. }
  1301. memset(gpmc_t, 0, sizeof(*gpmc_t));
  1302. gpmc_calc_common_timings(gpmc_t, dev_t, sync);
  1303. if (gpmc_s && gpmc_s->sync_read)
  1304. gpmc_calc_sync_read_timings(gpmc_t, dev_t, mux);
  1305. else
  1306. gpmc_calc_async_read_timings(gpmc_t, dev_t, mux);
  1307. if (gpmc_s && gpmc_s->sync_write)
  1308. gpmc_calc_sync_write_timings(gpmc_t, dev_t, mux);
  1309. else
  1310. gpmc_calc_async_write_timings(gpmc_t, dev_t, mux);
  1311. /* TODO: remove, see function definition */
  1312. gpmc_convert_ps_to_ns(gpmc_t);
  1313. return 0;
  1314. }
  1315. /**
  1316. * gpmc_cs_program_settings - programs non-timing related settings
  1317. * @cs: GPMC chip-select to program
  1318. * @p: pointer to GPMC settings structure
  1319. *
  1320. * Programs non-timing related settings for a GPMC chip-select, such as
  1321. * bus-width, burst configuration, etc. Function should be called once
  1322. * for each chip-select that is being used and must be called before
  1323. * calling gpmc_cs_set_timings() as timing parameters in the CONFIG1
  1324. * register will be initialised to zero by this function. Returns 0 on
  1325. * success and appropriate negative error code on failure.
  1326. */
  1327. int gpmc_cs_program_settings(int cs, struct gpmc_settings *p)
  1328. {
  1329. u32 config1;
  1330. if ((!p->device_width) || (p->device_width > GPMC_DEVWIDTH_16BIT)) {
  1331. pr_err("%s: invalid width %d!", __func__, p->device_width);
  1332. return -EINVAL;
  1333. }
  1334. /* Address-data multiplexing not supported for NAND devices */
  1335. if (p->device_nand && p->mux_add_data) {
  1336. pr_err("%s: invalid configuration!\n", __func__);
  1337. return -EINVAL;
  1338. }
  1339. if ((p->mux_add_data > GPMC_MUX_AD) ||
  1340. ((p->mux_add_data == GPMC_MUX_AAD) &&
  1341. !(gpmc_capability & GPMC_HAS_MUX_AAD))) {
  1342. pr_err("%s: invalid multiplex configuration!\n", __func__);
  1343. return -EINVAL;
  1344. }
  1345. /* Page/burst mode supports lengths of 4, 8 and 16 bytes */
  1346. if (p->burst_read || p->burst_write) {
  1347. switch (p->burst_len) {
  1348. case GPMC_BURST_4:
  1349. case GPMC_BURST_8:
  1350. case GPMC_BURST_16:
  1351. break;
  1352. default:
  1353. pr_err("%s: invalid page/burst-length (%d)\n",
  1354. __func__, p->burst_len);
  1355. return -EINVAL;
  1356. }
  1357. }
  1358. if (p->wait_pin > gpmc_nr_waitpins) {
  1359. pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin);
  1360. return -EINVAL;
  1361. }
  1362. config1 = GPMC_CONFIG1_DEVICESIZE((p->device_width - 1));
  1363. if (p->sync_read)
  1364. config1 |= GPMC_CONFIG1_READTYPE_SYNC;
  1365. if (p->sync_write)
  1366. config1 |= GPMC_CONFIG1_WRITETYPE_SYNC;
  1367. if (p->wait_on_read)
  1368. config1 |= GPMC_CONFIG1_WAIT_READ_MON;
  1369. if (p->wait_on_write)
  1370. config1 |= GPMC_CONFIG1_WAIT_WRITE_MON;
  1371. if (p->wait_on_read || p->wait_on_write)
  1372. config1 |= GPMC_CONFIG1_WAIT_PIN_SEL(p->wait_pin);
  1373. if (p->device_nand)
  1374. config1 |= GPMC_CONFIG1_DEVICETYPE(GPMC_DEVICETYPE_NAND);
  1375. if (p->mux_add_data)
  1376. config1 |= GPMC_CONFIG1_MUXTYPE(p->mux_add_data);
  1377. if (p->burst_read)
  1378. config1 |= GPMC_CONFIG1_READMULTIPLE_SUPP;
  1379. if (p->burst_write)
  1380. config1 |= GPMC_CONFIG1_WRITEMULTIPLE_SUPP;
  1381. if (p->burst_read || p->burst_write) {
  1382. config1 |= GPMC_CONFIG1_PAGE_LEN(p->burst_len >> 3);
  1383. config1 |= p->burst_wrap ? GPMC_CONFIG1_WRAPBURST_SUPP : 0;
  1384. }
  1385. gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, config1);
  1386. return 0;
  1387. }
  1388. #ifdef CONFIG_OF
  1389. static const struct of_device_id gpmc_dt_ids[] = {
  1390. { .compatible = "ti,omap2420-gpmc" },
  1391. { .compatible = "ti,omap2430-gpmc" },
  1392. { .compatible = "ti,omap3430-gpmc" }, /* omap3430 & omap3630 */
  1393. { .compatible = "ti,omap4430-gpmc" }, /* omap4430 & omap4460 & omap543x */
  1394. { .compatible = "ti,am3352-gpmc" }, /* am335x devices */
  1395. { }
  1396. };
  1397. MODULE_DEVICE_TABLE(of, gpmc_dt_ids);
  1398. /**
  1399. * gpmc_read_settings_dt - read gpmc settings from device-tree
  1400. * @np: pointer to device-tree node for a gpmc child device
  1401. * @p: pointer to gpmc settings structure
  1402. *
  1403. * Reads the GPMC settings for a GPMC child device from device-tree and
  1404. * stores them in the GPMC settings structure passed. The GPMC settings
  1405. * structure is initialised to zero by this function and so any
  1406. * previously stored settings will be cleared.
  1407. */
  1408. void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p)
  1409. {
  1410. memset(p, 0, sizeof(struct gpmc_settings));
  1411. p->sync_read = of_property_read_bool(np, "gpmc,sync-read");
  1412. p->sync_write = of_property_read_bool(np, "gpmc,sync-write");
  1413. of_property_read_u32(np, "gpmc,device-width", &p->device_width);
  1414. of_property_read_u32(np, "gpmc,mux-add-data", &p->mux_add_data);
  1415. if (!of_property_read_u32(np, "gpmc,burst-length", &p->burst_len)) {
  1416. p->burst_wrap = of_property_read_bool(np, "gpmc,burst-wrap");
  1417. p->burst_read = of_property_read_bool(np, "gpmc,burst-read");
  1418. p->burst_write = of_property_read_bool(np, "gpmc,burst-write");
  1419. if (!p->burst_read && !p->burst_write)
  1420. pr_warn("%s: page/burst-length set but not used!\n",
  1421. __func__);
  1422. }
  1423. if (!of_property_read_u32(np, "gpmc,wait-pin", &p->wait_pin)) {
  1424. p->wait_on_read = of_property_read_bool(np,
  1425. "gpmc,wait-on-read");
  1426. p->wait_on_write = of_property_read_bool(np,
  1427. "gpmc,wait-on-write");
  1428. if (!p->wait_on_read && !p->wait_on_write)
  1429. pr_debug("%s: rd/wr wait monitoring not enabled!\n",
  1430. __func__);
  1431. }
  1432. }
  1433. static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
  1434. struct gpmc_timings *gpmc_t)
  1435. {
  1436. struct gpmc_bool_timings *p;
  1437. if (!np || !gpmc_t)
  1438. return;
  1439. memset(gpmc_t, 0, sizeof(*gpmc_t));
  1440. /* minimum clock period for syncronous mode */
  1441. of_property_read_u32(np, "gpmc,sync-clk-ps", &gpmc_t->sync_clk);
  1442. /* chip select timtings */
  1443. of_property_read_u32(np, "gpmc,cs-on-ns", &gpmc_t->cs_on);
  1444. of_property_read_u32(np, "gpmc,cs-rd-off-ns", &gpmc_t->cs_rd_off);
  1445. of_property_read_u32(np, "gpmc,cs-wr-off-ns", &gpmc_t->cs_wr_off);
  1446. /* ADV signal timings */
  1447. of_property_read_u32(np, "gpmc,adv-on-ns", &gpmc_t->adv_on);
  1448. of_property_read_u32(np, "gpmc,adv-rd-off-ns", &gpmc_t->adv_rd_off);
  1449. of_property_read_u32(np, "gpmc,adv-wr-off-ns", &gpmc_t->adv_wr_off);
  1450. /* WE signal timings */
  1451. of_property_read_u32(np, "gpmc,we-on-ns", &gpmc_t->we_on);
  1452. of_property_read_u32(np, "gpmc,we-off-ns", &gpmc_t->we_off);
  1453. /* OE signal timings */
  1454. of_property_read_u32(np, "gpmc,oe-on-ns", &gpmc_t->oe_on);
  1455. of_property_read_u32(np, "gpmc,oe-off-ns", &gpmc_t->oe_off);
  1456. /* access and cycle timings */
  1457. of_property_read_u32(np, "gpmc,page-burst-access-ns",
  1458. &gpmc_t->page_burst_access);
  1459. of_property_read_u32(np, "gpmc,access-ns", &gpmc_t->access);
  1460. of_property_read_u32(np, "gpmc,rd-cycle-ns", &gpmc_t->rd_cycle);
  1461. of_property_read_u32(np, "gpmc,wr-cycle-ns", &gpmc_t->wr_cycle);
  1462. of_property_read_u32(np, "gpmc,bus-turnaround-ns",
  1463. &gpmc_t->bus_turnaround);
  1464. of_property_read_u32(np, "gpmc,cycle2cycle-delay-ns",
  1465. &gpmc_t->cycle2cycle_delay);
  1466. of_property_read_u32(np, "gpmc,wait-monitoring-ns",
  1467. &gpmc_t->wait_monitoring);
  1468. of_property_read_u32(np, "gpmc,clk-activation-ns",
  1469. &gpmc_t->clk_activation);
  1470. /* only applicable to OMAP3+ */
  1471. of_property_read_u32(np, "gpmc,wr-access-ns", &gpmc_t->wr_access);
  1472. of_property_read_u32(np, "gpmc,wr-data-mux-bus-ns",
  1473. &gpmc_t->wr_data_mux_bus);
  1474. /* bool timing parameters */
  1475. p = &gpmc_t->bool_timings;
  1476. p->cycle2cyclediffcsen =
  1477. of_property_read_bool(np, "gpmc,cycle2cycle-diffcsen");
  1478. p->cycle2cyclesamecsen =
  1479. of_property_read_bool(np, "gpmc,cycle2cycle-samecsen");
  1480. p->we_extra_delay = of_property_read_bool(np, "gpmc,we-extra-delay");
  1481. p->oe_extra_delay = of_property_read_bool(np, "gpmc,oe-extra-delay");
  1482. p->adv_extra_delay = of_property_read_bool(np, "gpmc,adv-extra-delay");
  1483. p->cs_extra_delay = of_property_read_bool(np, "gpmc,cs-extra-delay");
  1484. p->time_para_granularity =
  1485. of_property_read_bool(np, "gpmc,time-para-granularity");
  1486. }
  1487. #if IS_ENABLED(CONFIG_MTD_NAND)
  1488. static const char * const nand_xfer_types[] = {
  1489. [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
  1490. [NAND_OMAP_POLLED] = "polled",
  1491. [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
  1492. [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
  1493. };
  1494. static int gpmc_probe_nand_child(struct platform_device *pdev,
  1495. struct device_node *child)
  1496. {
  1497. u32 val;
  1498. const char *s;
  1499. struct gpmc_timings gpmc_t;
  1500. struct omap_nand_platform_data *gpmc_nand_data;
  1501. if (of_property_read_u32(child, "reg", &val) < 0) {
  1502. dev_err(&pdev->dev, "%s has no 'reg' property\n",
  1503. child->full_name);
  1504. return -ENODEV;
  1505. }
  1506. gpmc_nand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_nand_data),
  1507. GFP_KERNEL);
  1508. if (!gpmc_nand_data)
  1509. return -ENOMEM;
  1510. gpmc_nand_data->cs = val;
  1511. gpmc_nand_data->of_node = child;
  1512. /* Detect availability of ELM module */
  1513. gpmc_nand_data->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
  1514. if (gpmc_nand_data->elm_of_node == NULL)
  1515. gpmc_nand_data->elm_of_node =
  1516. of_parse_phandle(child, "elm_id", 0);
  1517. /* select ecc-scheme for NAND */
  1518. if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
  1519. pr_err("%s: ti,nand-ecc-opt not found\n", __func__);
  1520. return -ENODEV;
  1521. }
  1522. if (!strcmp(s, "sw"))
  1523. gpmc_nand_data->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
  1524. else if (!strcmp(s, "ham1") ||
  1525. !strcmp(s, "hw") || !strcmp(s, "hw-romcode"))
  1526. gpmc_nand_data->ecc_opt =
  1527. OMAP_ECC_HAM1_CODE_HW;
  1528. else if (!strcmp(s, "bch4"))
  1529. if (gpmc_nand_data->elm_of_node)
  1530. gpmc_nand_data->ecc_opt =
  1531. OMAP_ECC_BCH4_CODE_HW;
  1532. else
  1533. gpmc_nand_data->ecc_opt =
  1534. OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
  1535. else if (!strcmp(s, "bch8"))
  1536. if (gpmc_nand_data->elm_of_node)
  1537. gpmc_nand_data->ecc_opt =
  1538. OMAP_ECC_BCH8_CODE_HW;
  1539. else
  1540. gpmc_nand_data->ecc_opt =
  1541. OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
  1542. else if (!strcmp(s, "bch16"))
  1543. if (gpmc_nand_data->elm_of_node)
  1544. gpmc_nand_data->ecc_opt =
  1545. OMAP_ECC_BCH16_CODE_HW;
  1546. else
  1547. pr_err("%s: BCH16 requires ELM support\n", __func__);
  1548. else
  1549. pr_err("%s: ti,nand-ecc-opt invalid value\n", __func__);
  1550. /* select data transfer mode for NAND controller */
  1551. if (!of_property_read_string(child, "ti,nand-xfer-type", &s))
  1552. for (val = 0; val < ARRAY_SIZE(nand_xfer_types); val++)
  1553. if (!strcasecmp(s, nand_xfer_types[val])) {
  1554. gpmc_nand_data->xfer_type = val;
  1555. break;
  1556. }
  1557. gpmc_nand_data->flash_bbt = of_get_nand_on_flash_bbt(child);
  1558. val = of_get_nand_bus_width(child);
  1559. if (val == 16)
  1560. gpmc_nand_data->devsize = NAND_BUSWIDTH_16;
  1561. gpmc_read_timings_dt(child, &gpmc_t);
  1562. gpmc_nand_init(gpmc_nand_data, &gpmc_t);
  1563. return 0;
  1564. }
  1565. #else
  1566. static int gpmc_probe_nand_child(struct platform_device *pdev,
  1567. struct device_node *child)
  1568. {
  1569. return 0;
  1570. }
  1571. #endif
  1572. #if IS_ENABLED(CONFIG_MTD_ONENAND)
  1573. static int gpmc_probe_onenand_child(struct platform_device *pdev,
  1574. struct device_node *child)
  1575. {
  1576. u32 val;
  1577. struct omap_onenand_platform_data *gpmc_onenand_data;
  1578. if (of_property_read_u32(child, "reg", &val) < 0) {
  1579. dev_err(&pdev->dev, "%s has no 'reg' property\n",
  1580. child->full_name);
  1581. return -ENODEV;
  1582. }
  1583. gpmc_onenand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_onenand_data),
  1584. GFP_KERNEL);
  1585. if (!gpmc_onenand_data)
  1586. return -ENOMEM;
  1587. gpmc_onenand_data->cs = val;
  1588. gpmc_onenand_data->of_node = child;
  1589. gpmc_onenand_data->dma_channel = -1;
  1590. if (!of_property_read_u32(child, "dma-channel", &val))
  1591. gpmc_onenand_data->dma_channel = val;
  1592. gpmc_onenand_init(gpmc_onenand_data);
  1593. return 0;
  1594. }
  1595. #else
  1596. static int gpmc_probe_onenand_child(struct platform_device *pdev,
  1597. struct device_node *child)
  1598. {
  1599. return 0;
  1600. }
  1601. #endif
  1602. /**
  1603. * gpmc_probe_generic_child - configures the gpmc for a child device
  1604. * @pdev: pointer to gpmc platform device
  1605. * @child: pointer to device-tree node for child device
  1606. *
  1607. * Allocates and configures a GPMC chip-select for a child device.
  1608. * Returns 0 on success and appropriate negative error code on failure.
  1609. */
  1610. static int gpmc_probe_generic_child(struct platform_device *pdev,
  1611. struct device_node *child)
  1612. {
  1613. struct gpmc_settings gpmc_s;
  1614. struct gpmc_timings gpmc_t;
  1615. struct resource res;
  1616. unsigned long base;
  1617. const char *name;
  1618. int ret, cs;
  1619. u32 val;
  1620. if (of_property_read_u32(child, "reg", &cs) < 0) {
  1621. dev_err(&pdev->dev, "%s has no 'reg' property\n",
  1622. child->full_name);
  1623. return -ENODEV;
  1624. }
  1625. if (of_address_to_resource(child, 0, &res) < 0) {
  1626. dev_err(&pdev->dev, "%s has malformed 'reg' property\n",
  1627. child->full_name);
  1628. return -ENODEV;
  1629. }
  1630. /*
  1631. * Check if we have multiple instances of the same device
  1632. * on a single chip select. If so, use the already initialized
  1633. * timings.
  1634. */
  1635. name = gpmc_cs_get_name(cs);
  1636. if (name && child->name && of_node_cmp(child->name, name) == 0)
  1637. goto no_timings;
  1638. ret = gpmc_cs_request(cs, resource_size(&res), &base);
  1639. if (ret < 0) {
  1640. dev_err(&pdev->dev, "cannot request GPMC CS %d\n", cs);
  1641. return ret;
  1642. }
  1643. gpmc_cs_set_name(cs, child->name);
  1644. gpmc_read_settings_dt(child, &gpmc_s);
  1645. gpmc_read_timings_dt(child, &gpmc_t);
  1646. /*
  1647. * For some GPMC devices we still need to rely on the bootloader
  1648. * timings because the devices can be connected via FPGA.
  1649. * REVISIT: Add timing support from slls644g.pdf.
  1650. */
  1651. if (!gpmc_t.cs_rd_off) {
  1652. WARN(1, "enable GPMC debug to configure .dts timings for CS%i\n",
  1653. cs);
  1654. gpmc_cs_show_timings(cs,
  1655. "please add GPMC bootloader timings to .dts");
  1656. goto no_timings;
  1657. }
  1658. /* CS must be disabled while making changes to gpmc configuration */
  1659. gpmc_cs_disable_mem(cs);
  1660. /*
  1661. * FIXME: gpmc_cs_request() will map the CS to an arbitary
  1662. * location in the gpmc address space. When booting with
  1663. * device-tree we want the NOR flash to be mapped to the
  1664. * location specified in the device-tree blob. So remap the
  1665. * CS to this location. Once DT migration is complete should
  1666. * just make gpmc_cs_request() map a specific address.
  1667. */
  1668. ret = gpmc_cs_remap(cs, res.start);
  1669. if (ret < 0) {
  1670. dev_err(&pdev->dev, "cannot remap GPMC CS %d to %pa\n",
  1671. cs, &res.start);
  1672. goto err;
  1673. }
  1674. ret = of_property_read_u32(child, "bank-width", &gpmc_s.device_width);
  1675. if (ret < 0)
  1676. goto err;
  1677. ret = gpmc_cs_program_settings(cs, &gpmc_s);
  1678. if (ret < 0)
  1679. goto err;
  1680. ret = gpmc_cs_set_timings(cs, &gpmc_t, &gpmc_s);
  1681. if (ret) {
  1682. dev_err(&pdev->dev, "failed to set gpmc timings for: %s\n",
  1683. child->name);
  1684. goto err;
  1685. }
  1686. /* Clear limited address i.e. enable A26-A11 */
  1687. val = gpmc_read_reg(GPMC_CONFIG);
  1688. val &= ~GPMC_CONFIG_LIMITEDADDRESS;
  1689. gpmc_write_reg(GPMC_CONFIG, val);
  1690. /* Enable CS region */
  1691. gpmc_cs_enable_mem(cs);
  1692. no_timings:
  1693. /* create platform device, NULL on error or when disabled */
  1694. if (!of_platform_device_create(child, NULL, &pdev->dev))
  1695. goto err_child_fail;
  1696. /* is child a common bus? */
  1697. if (of_match_node(of_default_bus_match_table, child))
  1698. /* create children and other common bus children */
  1699. if (of_platform_populate(child, of_default_bus_match_table,
  1700. NULL, &pdev->dev))
  1701. goto err_child_fail;
  1702. return 0;
  1703. err_child_fail:
  1704. dev_err(&pdev->dev, "failed to create gpmc child %s\n", child->name);
  1705. ret = -ENODEV;
  1706. err:
  1707. gpmc_cs_free(cs);
  1708. return ret;
  1709. }
  1710. static int gpmc_probe_dt(struct platform_device *pdev)
  1711. {
  1712. int ret;
  1713. struct device_node *child;
  1714. const struct of_device_id *of_id =
  1715. of_match_device(gpmc_dt_ids, &pdev->dev);
  1716. if (!of_id)
  1717. return 0;
  1718. ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-cs",
  1719. &gpmc_cs_num);
  1720. if (ret < 0) {
  1721. pr_err("%s: number of chip-selects not defined\n", __func__);
  1722. return ret;
  1723. } else if (gpmc_cs_num < 1) {
  1724. pr_err("%s: all chip-selects are disabled\n", __func__);
  1725. return -EINVAL;
  1726. } else if (gpmc_cs_num > GPMC_CS_NUM) {
  1727. pr_err("%s: number of supported chip-selects cannot be > %d\n",
  1728. __func__, GPMC_CS_NUM);
  1729. return -EINVAL;
  1730. }
  1731. ret = of_property_read_u32(pdev->dev.of_node, "gpmc,num-waitpins",
  1732. &gpmc_nr_waitpins);
  1733. if (ret < 0) {
  1734. pr_err("%s: number of wait pins not found!\n", __func__);
  1735. return ret;
  1736. }
  1737. for_each_available_child_of_node(pdev->dev.of_node, child) {
  1738. if (!child->name)
  1739. continue;
  1740. if (of_node_cmp(child->name, "nand") == 0)
  1741. ret = gpmc_probe_nand_child(pdev, child);
  1742. else if (of_node_cmp(child->name, "onenand") == 0)
  1743. ret = gpmc_probe_onenand_child(pdev, child);
  1744. else if (of_node_cmp(child->name, "ethernet") == 0 ||
  1745. of_node_cmp(child->name, "nor") == 0 ||
  1746. of_node_cmp(child->name, "uart") == 0)
  1747. ret = gpmc_probe_generic_child(pdev, child);
  1748. if (WARN(ret < 0, "%s: probing gpmc child %s failed\n",
  1749. __func__, child->full_name))
  1750. of_node_put(child);
  1751. }
  1752. return 0;
  1753. }
  1754. #else
  1755. static int gpmc_probe_dt(struct platform_device *pdev)
  1756. {
  1757. return 0;
  1758. }
  1759. #endif
  1760. static int gpmc_probe(struct platform_device *pdev)
  1761. {
  1762. int rc;
  1763. u32 l;
  1764. struct resource *res;
  1765. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1766. if (res == NULL)
  1767. return -ENOENT;
  1768. phys_base = res->start;
  1769. mem_size = resource_size(res);
  1770. gpmc_base = devm_ioremap_resource(&pdev->dev, res);
  1771. if (IS_ERR(gpmc_base))
  1772. return PTR_ERR(gpmc_base);
  1773. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1774. if (res == NULL)
  1775. dev_warn(&pdev->dev, "Failed to get resource: irq\n");
  1776. else
  1777. gpmc_irq = res->start;
  1778. gpmc_l3_clk = devm_clk_get(&pdev->dev, "fck");
  1779. if (IS_ERR(gpmc_l3_clk)) {
  1780. dev_err(&pdev->dev, "Failed to get GPMC fck\n");
  1781. gpmc_irq = 0;
  1782. return PTR_ERR(gpmc_l3_clk);
  1783. }
  1784. if (!clk_get_rate(gpmc_l3_clk)) {
  1785. dev_err(&pdev->dev, "Invalid GPMC fck clock rate\n");
  1786. return -EINVAL;
  1787. }
  1788. pm_runtime_enable(&pdev->dev);
  1789. pm_runtime_get_sync(&pdev->dev);
  1790. gpmc_dev = &pdev->dev;
  1791. l = gpmc_read_reg(GPMC_REVISION);
  1792. /*
  1793. * FIXME: Once device-tree migration is complete the below flags
  1794. * should be populated based upon the device-tree compatible
  1795. * string. For now just use the IP revision. OMAP3+ devices have
  1796. * the wr_access and wr_data_mux_bus register fields. OMAP4+
  1797. * devices support the addr-addr-data multiplex protocol.
  1798. *
  1799. * GPMC IP revisions:
  1800. * - OMAP24xx = 2.0
  1801. * - OMAP3xxx = 5.0
  1802. * - OMAP44xx/54xx/AM335x = 6.0
  1803. */
  1804. if (GPMC_REVISION_MAJOR(l) > 0x4)
  1805. gpmc_capability = GPMC_HAS_WR_ACCESS | GPMC_HAS_WR_DATA_MUX_BUS;
  1806. if (GPMC_REVISION_MAJOR(l) > 0x5)
  1807. gpmc_capability |= GPMC_HAS_MUX_AAD;
  1808. dev_info(gpmc_dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l),
  1809. GPMC_REVISION_MINOR(l));
  1810. gpmc_mem_init();
  1811. if (gpmc_setup_irq() < 0)
  1812. dev_warn(gpmc_dev, "gpmc_setup_irq failed\n");
  1813. if (!pdev->dev.of_node) {
  1814. gpmc_cs_num = GPMC_CS_NUM;
  1815. gpmc_nr_waitpins = GPMC_NR_WAITPINS;
  1816. }
  1817. rc = gpmc_probe_dt(pdev);
  1818. if (rc < 0) {
  1819. pm_runtime_put_sync(&pdev->dev);
  1820. dev_err(gpmc_dev, "failed to probe DT parameters\n");
  1821. return rc;
  1822. }
  1823. return 0;
  1824. }
  1825. static int gpmc_remove(struct platform_device *pdev)
  1826. {
  1827. gpmc_free_irq();
  1828. gpmc_mem_exit();
  1829. pm_runtime_put_sync(&pdev->dev);
  1830. pm_runtime_disable(&pdev->dev);
  1831. gpmc_dev = NULL;
  1832. return 0;
  1833. }
  1834. #ifdef CONFIG_PM_SLEEP
  1835. static int gpmc_suspend(struct device *dev)
  1836. {
  1837. omap3_gpmc_save_context();
  1838. pm_runtime_put_sync(dev);
  1839. return 0;
  1840. }
  1841. static int gpmc_resume(struct device *dev)
  1842. {
  1843. pm_runtime_get_sync(dev);
  1844. omap3_gpmc_restore_context();
  1845. return 0;
  1846. }
  1847. #endif
  1848. static SIMPLE_DEV_PM_OPS(gpmc_pm_ops, gpmc_suspend, gpmc_resume);
  1849. static struct platform_driver gpmc_driver = {
  1850. .probe = gpmc_probe,
  1851. .remove = gpmc_remove,
  1852. .driver = {
  1853. .name = DEVICE_NAME,
  1854. .of_match_table = of_match_ptr(gpmc_dt_ids),
  1855. .pm = &gpmc_pm_ops,
  1856. },
  1857. };
  1858. static __init int gpmc_init(void)
  1859. {
  1860. return platform_driver_register(&gpmc_driver);
  1861. }
  1862. static __exit void gpmc_exit(void)
  1863. {
  1864. platform_driver_unregister(&gpmc_driver);
  1865. }
  1866. postcore_initcall(gpmc_init);
  1867. module_exit(gpmc_exit);
  1868. static irqreturn_t gpmc_handle_irq(int irq, void *dev)
  1869. {
  1870. int i;
  1871. u32 regval;
  1872. regval = gpmc_read_reg(GPMC_IRQSTATUS);
  1873. if (!regval)
  1874. return IRQ_NONE;
  1875. for (i = 0; i < GPMC_NR_IRQ; i++)
  1876. if (regval & gpmc_client_irq[i].bitmask)
  1877. generic_handle_irq(gpmc_client_irq[i].irq);
  1878. gpmc_write_reg(GPMC_IRQSTATUS, regval);
  1879. return IRQ_HANDLED;
  1880. }
  1881. static struct omap3_gpmc_regs gpmc_context;
  1882. void omap3_gpmc_save_context(void)
  1883. {
  1884. int i;
  1885. gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
  1886. gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
  1887. gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
  1888. gpmc_context.config = gpmc_read_reg(GPMC_CONFIG);
  1889. gpmc_context.prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
  1890. gpmc_context.prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2);
  1891. gpmc_context.prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL);
  1892. for (i = 0; i < gpmc_cs_num; i++) {
  1893. gpmc_context.cs_context[i].is_valid = gpmc_cs_mem_enabled(i);
  1894. if (gpmc_context.cs_context[i].is_valid) {
  1895. gpmc_context.cs_context[i].config1 =
  1896. gpmc_cs_read_reg(i, GPMC_CS_CONFIG1);
  1897. gpmc_context.cs_context[i].config2 =
  1898. gpmc_cs_read_reg(i, GPMC_CS_CONFIG2);
  1899. gpmc_context.cs_context[i].config3 =
  1900. gpmc_cs_read_reg(i, GPMC_CS_CONFIG3);
  1901. gpmc_context.cs_context[i].config4 =
  1902. gpmc_cs_read_reg(i, GPMC_CS_CONFIG4);
  1903. gpmc_context.cs_context[i].config5 =
  1904. gpmc_cs_read_reg(i, GPMC_CS_CONFIG5);
  1905. gpmc_context.cs_context[i].config6 =
  1906. gpmc_cs_read_reg(i, GPMC_CS_CONFIG6);
  1907. gpmc_context.cs_context[i].config7 =
  1908. gpmc_cs_read_reg(i, GPMC_CS_CONFIG7);
  1909. }
  1910. }
  1911. }
  1912. void omap3_gpmc_restore_context(void)
  1913. {
  1914. int i;
  1915. gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
  1916. gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
  1917. gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
  1918. gpmc_write_reg(GPMC_CONFIG, gpmc_context.config);
  1919. gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context.prefetch_config1);
  1920. gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context.prefetch_config2);
  1921. gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context.prefetch_control);
  1922. for (i = 0; i < gpmc_cs_num; i++) {
  1923. if (gpmc_context.cs_context[i].is_valid) {
  1924. gpmc_cs_write_reg(i, GPMC_CS_CONFIG1,
  1925. gpmc_context.cs_context[i].config1);
  1926. gpmc_cs_write_reg(i, GPMC_CS_CONFIG2,
  1927. gpmc_context.cs_context[i].config2);
  1928. gpmc_cs_write_reg(i, GPMC_CS_CONFIG3,
  1929. gpmc_context.cs_context[i].config3);
  1930. gpmc_cs_write_reg(i, GPMC_CS_CONFIG4,
  1931. gpmc_context.cs_context[i].config4);
  1932. gpmc_cs_write_reg(i, GPMC_CS_CONFIG5,
  1933. gpmc_context.cs_context[i].config5);
  1934. gpmc_cs_write_reg(i, GPMC_CS_CONFIG6,
  1935. gpmc_context.cs_context[i].config6);
  1936. gpmc_cs_write_reg(i, GPMC_CS_CONFIG7,
  1937. gpmc_context.cs_context[i].config7);
  1938. }
  1939. }
  1940. }