parport_ip32.c 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /* Low-level parallel port routines for built-in port on SGI IP32
  3. *
  4. * Author: Arnaud Giersch <arnaud.giersch@free.fr>
  5. *
  6. * Based on parport_pc.c by
  7. * Phil Blundell, Tim Waugh, Jose Renau, David Campbell,
  8. * Andrea Arcangeli, et al.
  9. *
  10. * Thanks to Ilya A. Volynets-Evenbakh for his help.
  11. *
  12. * Copyright (C) 2005, 2006 Arnaud Giersch.
  13. */
  14. /* Current status:
  15. *
  16. * Basic SPP and PS2 modes are supported.
  17. * Support for parallel port IRQ is present.
  18. * Hardware SPP (a.k.a. compatibility), EPP, and ECP modes are
  19. * supported.
  20. * SPP/ECP FIFO can be driven in PIO or DMA mode. PIO mode can work with
  21. * or without interrupt support.
  22. *
  23. * Hardware ECP mode is not fully implemented (ecp_read_data and
  24. * ecp_write_addr are actually missing).
  25. *
  26. * To do:
  27. *
  28. * Fully implement ECP mode.
  29. * EPP and ECP mode need to be tested. I currently do not own any
  30. * peripheral supporting these extended mode, and cannot test them.
  31. * If DMA mode works well, decide if support for PIO FIFO modes should be
  32. * dropped.
  33. * Use the io{read,write} family functions when they become available in
  34. * the linux-mips.org tree. Note: the MIPS specific functions readsb()
  35. * and writesb() are to be translated by ioread8_rep() and iowrite8_rep()
  36. * respectively.
  37. */
  38. /* The built-in parallel port on the SGI 02 workstation (a.k.a. IP32) is an
  39. * IEEE 1284 parallel port driven by a Texas Instrument TL16PIR552PH chip[1].
  40. * This chip supports SPP, bidirectional, EPP and ECP modes. It has a 16 byte
  41. * FIFO buffer and supports DMA transfers.
  42. *
  43. * [1] http://focus.ti.com/docs/prod/folders/print/tl16pir552.html
  44. *
  45. * Theoretically, we could simply use the parport_pc module. It is however
  46. * not so simple. The parport_pc code assumes that the parallel port
  47. * registers are port-mapped. On the O2, they are memory-mapped.
  48. * Furthermore, each register is replicated on 256 consecutive addresses (as
  49. * it is for the built-in serial ports on the same chip).
  50. */
  51. /*--- Some configuration defines ---------------------------------------*/
  52. /* DEBUG_PARPORT_IP32
  53. * 0 disable debug
  54. * 1 standard level: pr_debug1 is enabled
  55. * 2 parport_ip32_dump_state is enabled
  56. * >=3 verbose level: pr_debug is enabled
  57. */
  58. #if !defined(DEBUG_PARPORT_IP32)
  59. # define DEBUG_PARPORT_IP32 0 /* 0 (disabled) for production */
  60. #endif
  61. /*----------------------------------------------------------------------*/
  62. /* Setup DEBUG macros. This is done before any includes, just in case we
  63. * activate pr_debug() with DEBUG_PARPORT_IP32 >= 3.
  64. */
  65. #if DEBUG_PARPORT_IP32 == 1
  66. # warning DEBUG_PARPORT_IP32 == 1
  67. #elif DEBUG_PARPORT_IP32 == 2
  68. # warning DEBUG_PARPORT_IP32 == 2
  69. #elif DEBUG_PARPORT_IP32 >= 3
  70. # warning DEBUG_PARPORT_IP32 >= 3
  71. # if !defined(DEBUG)
  72. # define DEBUG /* enable pr_debug() in kernel.h */
  73. # endif
  74. #endif
  75. #include <linux/completion.h>
  76. #include <linux/delay.h>
  77. #include <linux/dma-mapping.h>
  78. #include <linux/err.h>
  79. #include <linux/init.h>
  80. #include <linux/interrupt.h>
  81. #include <linux/jiffies.h>
  82. #include <linux/kernel.h>
  83. #include <linux/module.h>
  84. #include <linux/parport.h>
  85. #include <linux/sched/signal.h>
  86. #include <linux/slab.h>
  87. #include <linux/spinlock.h>
  88. #include <linux/stddef.h>
  89. #include <linux/types.h>
  90. #include <asm/io.h>
  91. #include <asm/ip32/ip32_ints.h>
  92. #include <asm/ip32/mace.h>
  93. /*--- Global variables -------------------------------------------------*/
  94. /* Verbose probing on by default for debugging. */
  95. #if DEBUG_PARPORT_IP32 >= 1
  96. # define DEFAULT_VERBOSE_PROBING 1
  97. #else
  98. # define DEFAULT_VERBOSE_PROBING 0
  99. #endif
  100. /* Default prefix for printk */
  101. #define PPIP32 "parport_ip32: "
  102. /*
  103. * These are the module parameters:
  104. * @features: bit mask of features to enable/disable
  105. * (all enabled by default)
  106. * @verbose_probing: log chit-chat during initialization
  107. */
  108. #define PARPORT_IP32_ENABLE_IRQ (1U << 0)
  109. #define PARPORT_IP32_ENABLE_DMA (1U << 1)
  110. #define PARPORT_IP32_ENABLE_SPP (1U << 2)
  111. #define PARPORT_IP32_ENABLE_EPP (1U << 3)
  112. #define PARPORT_IP32_ENABLE_ECP (1U << 4)
  113. static unsigned int features = ~0U;
  114. static bool verbose_probing = DEFAULT_VERBOSE_PROBING;
  115. /* We do not support more than one port. */
  116. static struct parport *this_port;
  117. /* Timing constants for FIFO modes. */
  118. #define FIFO_NFAULT_TIMEOUT 100 /* milliseconds */
  119. #define FIFO_POLLING_INTERVAL 50 /* microseconds */
  120. /*--- I/O register definitions -----------------------------------------*/
  121. /**
  122. * struct parport_ip32_regs - virtual addresses of parallel port registers
  123. * @data: Data Register
  124. * @dsr: Device Status Register
  125. * @dcr: Device Control Register
  126. * @eppAddr: EPP Address Register
  127. * @eppData0: EPP Data Register 0
  128. * @eppData1: EPP Data Register 1
  129. * @eppData2: EPP Data Register 2
  130. * @eppData3: EPP Data Register 3
  131. * @ecpAFifo: ECP Address FIFO
  132. * @fifo: General FIFO register. The same address is used for:
  133. * - cFifo, the Parallel Port DATA FIFO
  134. * - ecpDFifo, the ECP Data FIFO
  135. * - tFifo, the ECP Test FIFO
  136. * @cnfgA: Configuration Register A
  137. * @cnfgB: Configuration Register B
  138. * @ecr: Extended Control Register
  139. */
  140. struct parport_ip32_regs {
  141. void __iomem *data;
  142. void __iomem *dsr;
  143. void __iomem *dcr;
  144. void __iomem *eppAddr;
  145. void __iomem *eppData0;
  146. void __iomem *eppData1;
  147. void __iomem *eppData2;
  148. void __iomem *eppData3;
  149. void __iomem *ecpAFifo;
  150. void __iomem *fifo;
  151. void __iomem *cnfgA;
  152. void __iomem *cnfgB;
  153. void __iomem *ecr;
  154. };
  155. /* Device Status Register */
  156. #define DSR_nBUSY (1U << 7) /* PARPORT_STATUS_BUSY */
  157. #define DSR_nACK (1U << 6) /* PARPORT_STATUS_ACK */
  158. #define DSR_PERROR (1U << 5) /* PARPORT_STATUS_PAPEROUT */
  159. #define DSR_SELECT (1U << 4) /* PARPORT_STATUS_SELECT */
  160. #define DSR_nFAULT (1U << 3) /* PARPORT_STATUS_ERROR */
  161. #define DSR_nPRINT (1U << 2) /* specific to TL16PIR552 */
  162. /* #define DSR_reserved (1U << 1) */
  163. #define DSR_TIMEOUT (1U << 0) /* EPP timeout */
  164. /* Device Control Register */
  165. /* #define DCR_reserved (1U << 7) | (1U << 6) */
  166. #define DCR_DIR (1U << 5) /* direction */
  167. #define DCR_IRQ (1U << 4) /* interrupt on nAck */
  168. #define DCR_SELECT (1U << 3) /* PARPORT_CONTROL_SELECT */
  169. #define DCR_nINIT (1U << 2) /* PARPORT_CONTROL_INIT */
  170. #define DCR_AUTOFD (1U << 1) /* PARPORT_CONTROL_AUTOFD */
  171. #define DCR_STROBE (1U << 0) /* PARPORT_CONTROL_STROBE */
  172. /* ECP Configuration Register A */
  173. #define CNFGA_IRQ (1U << 7)
  174. #define CNFGA_ID_MASK ((1U << 6) | (1U << 5) | (1U << 4))
  175. #define CNFGA_ID_SHIFT 4
  176. #define CNFGA_ID_16 (00U << CNFGA_ID_SHIFT)
  177. #define CNFGA_ID_8 (01U << CNFGA_ID_SHIFT)
  178. #define CNFGA_ID_32 (02U << CNFGA_ID_SHIFT)
  179. /* #define CNFGA_reserved (1U << 3) */
  180. #define CNFGA_nBYTEINTRANS (1U << 2)
  181. #define CNFGA_PWORDLEFT ((1U << 1) | (1U << 0))
  182. /* ECP Configuration Register B */
  183. #define CNFGB_COMPRESS (1U << 7)
  184. #define CNFGB_INTRVAL (1U << 6)
  185. #define CNFGB_IRQ_MASK ((1U << 5) | (1U << 4) | (1U << 3))
  186. #define CNFGB_IRQ_SHIFT 3
  187. #define CNFGB_DMA_MASK ((1U << 2) | (1U << 1) | (1U << 0))
  188. #define CNFGB_DMA_SHIFT 0
  189. /* Extended Control Register */
  190. #define ECR_MODE_MASK ((1U << 7) | (1U << 6) | (1U << 5))
  191. #define ECR_MODE_SHIFT 5
  192. #define ECR_MODE_SPP (00U << ECR_MODE_SHIFT)
  193. #define ECR_MODE_PS2 (01U << ECR_MODE_SHIFT)
  194. #define ECR_MODE_PPF (02U << ECR_MODE_SHIFT)
  195. #define ECR_MODE_ECP (03U << ECR_MODE_SHIFT)
  196. #define ECR_MODE_EPP (04U << ECR_MODE_SHIFT)
  197. /* #define ECR_MODE_reserved (05U << ECR_MODE_SHIFT) */
  198. #define ECR_MODE_TST (06U << ECR_MODE_SHIFT)
  199. #define ECR_MODE_CFG (07U << ECR_MODE_SHIFT)
  200. #define ECR_nERRINTR (1U << 4)
  201. #define ECR_DMAEN (1U << 3)
  202. #define ECR_SERVINTR (1U << 2)
  203. #define ECR_F_FULL (1U << 1)
  204. #define ECR_F_EMPTY (1U << 0)
  205. /*--- Private data -----------------------------------------------------*/
  206. /**
  207. * enum parport_ip32_irq_mode - operation mode of interrupt handler
  208. * @PARPORT_IP32_IRQ_FWD: forward interrupt to the upper parport layer
  209. * @PARPORT_IP32_IRQ_HERE: interrupt is handled locally
  210. */
  211. enum parport_ip32_irq_mode { PARPORT_IP32_IRQ_FWD, PARPORT_IP32_IRQ_HERE };
  212. /**
  213. * struct parport_ip32_private - private stuff for &struct parport
  214. * @regs: register addresses
  215. * @dcr_cache: cached contents of DCR
  216. * @dcr_writable: bit mask of writable DCR bits
  217. * @pword: number of bytes per PWord
  218. * @fifo_depth: number of PWords that FIFO will hold
  219. * @readIntrThreshold: minimum number of PWords we can read
  220. * if we get an interrupt
  221. * @writeIntrThreshold: minimum number of PWords we can write
  222. * if we get an interrupt
  223. * @irq_mode: operation mode of interrupt handler for this port
  224. * @irq_complete: mutex used to wait for an interrupt to occur
  225. */
  226. struct parport_ip32_private {
  227. struct parport_ip32_regs regs;
  228. unsigned int dcr_cache;
  229. unsigned int dcr_writable;
  230. unsigned int pword;
  231. unsigned int fifo_depth;
  232. unsigned int readIntrThreshold;
  233. unsigned int writeIntrThreshold;
  234. enum parport_ip32_irq_mode irq_mode;
  235. struct completion irq_complete;
  236. };
  237. /*--- Debug code -------------------------------------------------------*/
  238. /*
  239. * pr_debug1 - print debug messages
  240. *
  241. * This is like pr_debug(), but is defined for %DEBUG_PARPORT_IP32 >= 1
  242. */
  243. #if DEBUG_PARPORT_IP32 >= 1
  244. # define pr_debug1(...) printk(KERN_DEBUG __VA_ARGS__)
  245. #else /* DEBUG_PARPORT_IP32 < 1 */
  246. # define pr_debug1(...) do { } while (0)
  247. #endif
  248. /*
  249. * pr_trace, pr_trace1 - trace function calls
  250. * @p: pointer to &struct parport
  251. * @fmt: printk format string
  252. * @...: parameters for format string
  253. *
  254. * Macros used to trace function calls. The given string is formatted after
  255. * function name. pr_trace() uses pr_debug(), and pr_trace1() uses
  256. * pr_debug1(). __pr_trace() is the low-level macro and is not to be used
  257. * directly.
  258. */
  259. #define __pr_trace(pr, p, fmt, ...) \
  260. pr("%s: %s" fmt "\n", \
  261. ({ const struct parport *__p = (p); \
  262. __p ? __p->name : "parport_ip32"; }), \
  263. __func__ , ##__VA_ARGS__)
  264. #define pr_trace(p, fmt, ...) __pr_trace(pr_debug, p, fmt , ##__VA_ARGS__)
  265. #define pr_trace1(p, fmt, ...) __pr_trace(pr_debug1, p, fmt , ##__VA_ARGS__)
  266. /*
  267. * __pr_probe, pr_probe - print message if @verbose_probing is true
  268. * @p: pointer to &struct parport
  269. * @fmt: printk format string
  270. * @...: parameters for format string
  271. *
  272. * For new lines, use pr_probe(). Use __pr_probe() for continued lines.
  273. */
  274. #define __pr_probe(...) \
  275. do { if (verbose_probing) printk(__VA_ARGS__); } while (0)
  276. #define pr_probe(p, fmt, ...) \
  277. __pr_probe(KERN_INFO PPIP32 "0x%lx: " fmt, (p)->base , ##__VA_ARGS__)
  278. /*
  279. * parport_ip32_dump_state - print register status of parport
  280. * @p: pointer to &struct parport
  281. * @str: string to add in message
  282. * @show_ecp_config: shall we dump ECP configuration registers too?
  283. *
  284. * This function is only here for debugging purpose, and should be used with
  285. * care. Reading the parallel port registers may have undesired side effects.
  286. * Especially if @show_ecp_config is true, the parallel port is resetted.
  287. * This function is only defined if %DEBUG_PARPORT_IP32 >= 2.
  288. */
  289. #if DEBUG_PARPORT_IP32 >= 2
  290. static void parport_ip32_dump_state(struct parport *p, char *str,
  291. unsigned int show_ecp_config)
  292. {
  293. struct parport_ip32_private * const priv = p->physport->private_data;
  294. unsigned int i;
  295. printk(KERN_DEBUG PPIP32 "%s: state (%s):\n", p->name, str);
  296. {
  297. static const char ecr_modes[8][4] = {"SPP", "PS2", "PPF",
  298. "ECP", "EPP", "???",
  299. "TST", "CFG"};
  300. unsigned int ecr = readb(priv->regs.ecr);
  301. printk(KERN_DEBUG PPIP32 " ecr=0x%02x", ecr);
  302. printk(" %s",
  303. ecr_modes[(ecr & ECR_MODE_MASK) >> ECR_MODE_SHIFT]);
  304. if (ecr & ECR_nERRINTR)
  305. printk(",nErrIntrEn");
  306. if (ecr & ECR_DMAEN)
  307. printk(",dmaEn");
  308. if (ecr & ECR_SERVINTR)
  309. printk(",serviceIntr");
  310. if (ecr & ECR_F_FULL)
  311. printk(",f_full");
  312. if (ecr & ECR_F_EMPTY)
  313. printk(",f_empty");
  314. printk("\n");
  315. }
  316. if (show_ecp_config) {
  317. unsigned int oecr, cnfgA, cnfgB;
  318. oecr = readb(priv->regs.ecr);
  319. writeb(ECR_MODE_PS2, priv->regs.ecr);
  320. writeb(ECR_MODE_CFG, priv->regs.ecr);
  321. cnfgA = readb(priv->regs.cnfgA);
  322. cnfgB = readb(priv->regs.cnfgB);
  323. writeb(ECR_MODE_PS2, priv->regs.ecr);
  324. writeb(oecr, priv->regs.ecr);
  325. printk(KERN_DEBUG PPIP32 " cnfgA=0x%02x", cnfgA);
  326. printk(" ISA-%s", (cnfgA & CNFGA_IRQ) ? "Level" : "Pulses");
  327. switch (cnfgA & CNFGA_ID_MASK) {
  328. case CNFGA_ID_8:
  329. printk(",8 bits");
  330. break;
  331. case CNFGA_ID_16:
  332. printk(",16 bits");
  333. break;
  334. case CNFGA_ID_32:
  335. printk(",32 bits");
  336. break;
  337. default:
  338. printk(",unknown ID");
  339. break;
  340. }
  341. if (!(cnfgA & CNFGA_nBYTEINTRANS))
  342. printk(",ByteInTrans");
  343. if ((cnfgA & CNFGA_ID_MASK) != CNFGA_ID_8)
  344. printk(",%d byte%s left", cnfgA & CNFGA_PWORDLEFT,
  345. ((cnfgA & CNFGA_PWORDLEFT) > 1) ? "s" : "");
  346. printk("\n");
  347. printk(KERN_DEBUG PPIP32 " cnfgB=0x%02x", cnfgB);
  348. printk(" irq=%u,dma=%u",
  349. (cnfgB & CNFGB_IRQ_MASK) >> CNFGB_IRQ_SHIFT,
  350. (cnfgB & CNFGB_DMA_MASK) >> CNFGB_DMA_SHIFT);
  351. printk(",intrValue=%d", !!(cnfgB & CNFGB_INTRVAL));
  352. if (cnfgB & CNFGB_COMPRESS)
  353. printk(",compress");
  354. printk("\n");
  355. }
  356. for (i = 0; i < 2; i++) {
  357. unsigned int dcr = i ? priv->dcr_cache : readb(priv->regs.dcr);
  358. printk(KERN_DEBUG PPIP32 " dcr(%s)=0x%02x",
  359. i ? "soft" : "hard", dcr);
  360. printk(" %s", (dcr & DCR_DIR) ? "rev" : "fwd");
  361. if (dcr & DCR_IRQ)
  362. printk(",ackIntEn");
  363. if (!(dcr & DCR_SELECT))
  364. printk(",nSelectIn");
  365. if (dcr & DCR_nINIT)
  366. printk(",nInit");
  367. if (!(dcr & DCR_AUTOFD))
  368. printk(",nAutoFD");
  369. if (!(dcr & DCR_STROBE))
  370. printk(",nStrobe");
  371. printk("\n");
  372. }
  373. #define sep (f++ ? ',' : ' ')
  374. {
  375. unsigned int f = 0;
  376. unsigned int dsr = readb(priv->regs.dsr);
  377. printk(KERN_DEBUG PPIP32 " dsr=0x%02x", dsr);
  378. if (!(dsr & DSR_nBUSY))
  379. printk("%cBusy", sep);
  380. if (dsr & DSR_nACK)
  381. printk("%cnAck", sep);
  382. if (dsr & DSR_PERROR)
  383. printk("%cPError", sep);
  384. if (dsr & DSR_SELECT)
  385. printk("%cSelect", sep);
  386. if (dsr & DSR_nFAULT)
  387. printk("%cnFault", sep);
  388. if (!(dsr & DSR_nPRINT))
  389. printk("%c(Print)", sep);
  390. if (dsr & DSR_TIMEOUT)
  391. printk("%cTimeout", sep);
  392. printk("\n");
  393. }
  394. #undef sep
  395. }
  396. #else /* DEBUG_PARPORT_IP32 < 2 */
  397. #define parport_ip32_dump_state(...) do { } while (0)
  398. #endif
  399. /*
  400. * CHECK_EXTRA_BITS - track and log extra bits
  401. * @p: pointer to &struct parport
  402. * @b: byte to inspect
  403. * @m: bit mask of authorized bits
  404. *
  405. * This is used to track and log extra bits that should not be there in
  406. * parport_ip32_write_control() and parport_ip32_frob_control(). It is only
  407. * defined if %DEBUG_PARPORT_IP32 >= 1.
  408. */
  409. #if DEBUG_PARPORT_IP32 >= 1
  410. #define CHECK_EXTRA_BITS(p, b, m) \
  411. do { \
  412. unsigned int __b = (b), __m = (m); \
  413. if (__b & ~__m) \
  414. pr_debug1(PPIP32 "%s: extra bits in %s(%s): " \
  415. "0x%02x/0x%02x\n", \
  416. (p)->name, __func__, #b, __b, __m); \
  417. } while (0)
  418. #else /* DEBUG_PARPORT_IP32 < 1 */
  419. #define CHECK_EXTRA_BITS(...) do { } while (0)
  420. #endif
  421. /*--- IP32 parallel port DMA operations --------------------------------*/
  422. /**
  423. * struct parport_ip32_dma_data - private data needed for DMA operation
  424. * @dir: DMA direction (from or to device)
  425. * @buf: buffer physical address
  426. * @len: buffer length
  427. * @next: address of next bytes to DMA transfer
  428. * @left: number of bytes remaining
  429. * @ctx: next context to write (0: context_a; 1: context_b)
  430. * @irq_on: are the DMA IRQs currently enabled?
  431. * @lock: spinlock to protect access to the structure
  432. */
  433. struct parport_ip32_dma_data {
  434. enum dma_data_direction dir;
  435. dma_addr_t buf;
  436. dma_addr_t next;
  437. size_t len;
  438. size_t left;
  439. unsigned int ctx;
  440. unsigned int irq_on;
  441. spinlock_t lock;
  442. };
  443. static struct parport_ip32_dma_data parport_ip32_dma;
  444. /**
  445. * parport_ip32_dma_setup_context - setup next DMA context
  446. * @limit: maximum data size for the context
  447. *
  448. * The alignment constraints must be verified in caller function, and the
  449. * parameter @limit must be set accordingly.
  450. */
  451. static void parport_ip32_dma_setup_context(unsigned int limit)
  452. {
  453. unsigned long flags;
  454. spin_lock_irqsave(&parport_ip32_dma.lock, flags);
  455. if (parport_ip32_dma.left > 0) {
  456. /* Note: ctxreg is "volatile" here only because
  457. * mace->perif.ctrl.parport.context_a and context_b are
  458. * "volatile". */
  459. volatile u64 __iomem *ctxreg = (parport_ip32_dma.ctx == 0) ?
  460. &mace->perif.ctrl.parport.context_a :
  461. &mace->perif.ctrl.parport.context_b;
  462. u64 count;
  463. u64 ctxval;
  464. if (parport_ip32_dma.left <= limit) {
  465. count = parport_ip32_dma.left;
  466. ctxval = MACEPAR_CONTEXT_LASTFLAG;
  467. } else {
  468. count = limit;
  469. ctxval = 0;
  470. }
  471. pr_trace(NULL,
  472. "(%u): 0x%04x:0x%04x, %u -> %u%s",
  473. limit,
  474. (unsigned int)parport_ip32_dma.buf,
  475. (unsigned int)parport_ip32_dma.next,
  476. (unsigned int)count,
  477. parport_ip32_dma.ctx, ctxval ? "*" : "");
  478. ctxval |= parport_ip32_dma.next &
  479. MACEPAR_CONTEXT_BASEADDR_MASK;
  480. ctxval |= ((count - 1) << MACEPAR_CONTEXT_DATALEN_SHIFT) &
  481. MACEPAR_CONTEXT_DATALEN_MASK;
  482. writeq(ctxval, ctxreg);
  483. parport_ip32_dma.next += count;
  484. parport_ip32_dma.left -= count;
  485. parport_ip32_dma.ctx ^= 1U;
  486. }
  487. /* If there is nothing more to send, disable IRQs to avoid to
  488. * face an IRQ storm which can lock the machine. Disable them
  489. * only once. */
  490. if (parport_ip32_dma.left == 0 && parport_ip32_dma.irq_on) {
  491. pr_debug(PPIP32 "IRQ off (ctx)\n");
  492. disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);
  493. disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);
  494. parport_ip32_dma.irq_on = 0;
  495. }
  496. spin_unlock_irqrestore(&parport_ip32_dma.lock, flags);
  497. }
  498. /**
  499. * parport_ip32_dma_interrupt - DMA interrupt handler
  500. * @irq: interrupt number
  501. * @dev_id: unused
  502. */
  503. static irqreturn_t parport_ip32_dma_interrupt(int irq, void *dev_id)
  504. {
  505. if (parport_ip32_dma.left)
  506. pr_trace(NULL, "(%d): ctx=%d", irq, parport_ip32_dma.ctx);
  507. parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);
  508. return IRQ_HANDLED;
  509. }
  510. #if DEBUG_PARPORT_IP32
  511. static irqreturn_t parport_ip32_merr_interrupt(int irq, void *dev_id)
  512. {
  513. pr_trace1(NULL, "(%d)", irq);
  514. return IRQ_HANDLED;
  515. }
  516. #endif
  517. /**
  518. * parport_ip32_dma_start - begins a DMA transfer
  519. * @p: partport to work on
  520. * @dir: DMA direction: DMA_TO_DEVICE or DMA_FROM_DEVICE
  521. * @addr: pointer to data buffer
  522. * @count: buffer size
  523. *
  524. * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
  525. * correctly balanced.
  526. */
  527. static int parport_ip32_dma_start(struct parport *p,
  528. enum dma_data_direction dir, void *addr, size_t count)
  529. {
  530. unsigned int limit;
  531. u64 ctrl;
  532. pr_trace(NULL, "(%d, %lu)", dir, (unsigned long)count);
  533. /* FIXME - add support for DMA_FROM_DEVICE. In this case, buffer must
  534. * be 64 bytes aligned. */
  535. BUG_ON(dir != DMA_TO_DEVICE);
  536. /* Reset DMA controller */
  537. ctrl = MACEPAR_CTLSTAT_RESET;
  538. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  539. /* DMA IRQs should normally be enabled */
  540. if (!parport_ip32_dma.irq_on) {
  541. WARN_ON(1);
  542. enable_irq(MACEISA_PAR_CTXA_IRQ);
  543. enable_irq(MACEISA_PAR_CTXB_IRQ);
  544. parport_ip32_dma.irq_on = 1;
  545. }
  546. /* Prepare DMA pointers */
  547. parport_ip32_dma.dir = dir;
  548. parport_ip32_dma.buf = dma_map_single(&p->bus_dev, addr, count, dir);
  549. parport_ip32_dma.len = count;
  550. parport_ip32_dma.next = parport_ip32_dma.buf;
  551. parport_ip32_dma.left = parport_ip32_dma.len;
  552. parport_ip32_dma.ctx = 0;
  553. /* Setup DMA direction and first two contexts */
  554. ctrl = (dir == DMA_TO_DEVICE) ? 0 : MACEPAR_CTLSTAT_DIRECTION;
  555. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  556. /* Single transfer should not cross a 4K page boundary */
  557. limit = MACEPAR_CONTEXT_DATA_BOUND -
  558. (parport_ip32_dma.next & (MACEPAR_CONTEXT_DATA_BOUND - 1));
  559. parport_ip32_dma_setup_context(limit);
  560. parport_ip32_dma_setup_context(MACEPAR_CONTEXT_DATA_BOUND);
  561. /* Real start of DMA transfer */
  562. ctrl |= MACEPAR_CTLSTAT_ENABLE;
  563. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  564. return 0;
  565. }
  566. /**
  567. * parport_ip32_dma_stop - ends a running DMA transfer
  568. * @p: partport to work on
  569. *
  570. * Calls to parport_ip32_dma_start() and parport_ip32_dma_stop() must be
  571. * correctly balanced.
  572. */
  573. static void parport_ip32_dma_stop(struct parport *p)
  574. {
  575. u64 ctx_a;
  576. u64 ctx_b;
  577. u64 ctrl;
  578. u64 diag;
  579. size_t res[2]; /* {[0] = res_a, [1] = res_b} */
  580. pr_trace(NULL, "()");
  581. /* Disable IRQs */
  582. spin_lock_irq(&parport_ip32_dma.lock);
  583. if (parport_ip32_dma.irq_on) {
  584. pr_debug(PPIP32 "IRQ off (stop)\n");
  585. disable_irq_nosync(MACEISA_PAR_CTXA_IRQ);
  586. disable_irq_nosync(MACEISA_PAR_CTXB_IRQ);
  587. parport_ip32_dma.irq_on = 0;
  588. }
  589. spin_unlock_irq(&parport_ip32_dma.lock);
  590. /* Force IRQ synchronization, even if the IRQs were disabled
  591. * elsewhere. */
  592. synchronize_irq(MACEISA_PAR_CTXA_IRQ);
  593. synchronize_irq(MACEISA_PAR_CTXB_IRQ);
  594. /* Stop DMA transfer */
  595. ctrl = readq(&mace->perif.ctrl.parport.cntlstat);
  596. ctrl &= ~MACEPAR_CTLSTAT_ENABLE;
  597. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  598. /* Adjust residue (parport_ip32_dma.left) */
  599. ctx_a = readq(&mace->perif.ctrl.parport.context_a);
  600. ctx_b = readq(&mace->perif.ctrl.parport.context_b);
  601. ctrl = readq(&mace->perif.ctrl.parport.cntlstat);
  602. diag = readq(&mace->perif.ctrl.parport.diagnostic);
  603. res[0] = (ctrl & MACEPAR_CTLSTAT_CTXA_VALID) ?
  604. 1 + ((ctx_a & MACEPAR_CONTEXT_DATALEN_MASK) >>
  605. MACEPAR_CONTEXT_DATALEN_SHIFT) :
  606. 0;
  607. res[1] = (ctrl & MACEPAR_CTLSTAT_CTXB_VALID) ?
  608. 1 + ((ctx_b & MACEPAR_CONTEXT_DATALEN_MASK) >>
  609. MACEPAR_CONTEXT_DATALEN_SHIFT) :
  610. 0;
  611. if (diag & MACEPAR_DIAG_DMACTIVE)
  612. res[(diag & MACEPAR_DIAG_CTXINUSE) != 0] =
  613. 1 + ((diag & MACEPAR_DIAG_CTRMASK) >>
  614. MACEPAR_DIAG_CTRSHIFT);
  615. parport_ip32_dma.left += res[0] + res[1];
  616. /* Reset DMA controller, and re-enable IRQs */
  617. ctrl = MACEPAR_CTLSTAT_RESET;
  618. writeq(ctrl, &mace->perif.ctrl.parport.cntlstat);
  619. pr_debug(PPIP32 "IRQ on (stop)\n");
  620. enable_irq(MACEISA_PAR_CTXA_IRQ);
  621. enable_irq(MACEISA_PAR_CTXB_IRQ);
  622. parport_ip32_dma.irq_on = 1;
  623. dma_unmap_single(&p->bus_dev, parport_ip32_dma.buf,
  624. parport_ip32_dma.len, parport_ip32_dma.dir);
  625. }
  626. /**
  627. * parport_ip32_dma_get_residue - get residue from last DMA transfer
  628. *
  629. * Returns the number of bytes remaining from last DMA transfer.
  630. */
  631. static inline size_t parport_ip32_dma_get_residue(void)
  632. {
  633. return parport_ip32_dma.left;
  634. }
  635. /**
  636. * parport_ip32_dma_register - initialize DMA engine
  637. *
  638. * Returns zero for success.
  639. */
  640. static int parport_ip32_dma_register(void)
  641. {
  642. int err;
  643. spin_lock_init(&parport_ip32_dma.lock);
  644. parport_ip32_dma.irq_on = 1;
  645. /* Reset DMA controller */
  646. writeq(MACEPAR_CTLSTAT_RESET, &mace->perif.ctrl.parport.cntlstat);
  647. /* Request IRQs */
  648. err = request_irq(MACEISA_PAR_CTXA_IRQ, parport_ip32_dma_interrupt,
  649. 0, "parport_ip32", NULL);
  650. if (err)
  651. goto fail_a;
  652. err = request_irq(MACEISA_PAR_CTXB_IRQ, parport_ip32_dma_interrupt,
  653. 0, "parport_ip32", NULL);
  654. if (err)
  655. goto fail_b;
  656. #if DEBUG_PARPORT_IP32
  657. /* FIXME - what is this IRQ for? */
  658. err = request_irq(MACEISA_PAR_MERR_IRQ, parport_ip32_merr_interrupt,
  659. 0, "parport_ip32", NULL);
  660. if (err)
  661. goto fail_merr;
  662. #endif
  663. return 0;
  664. #if DEBUG_PARPORT_IP32
  665. fail_merr:
  666. free_irq(MACEISA_PAR_CTXB_IRQ, NULL);
  667. #endif
  668. fail_b:
  669. free_irq(MACEISA_PAR_CTXA_IRQ, NULL);
  670. fail_a:
  671. return err;
  672. }
  673. /**
  674. * parport_ip32_dma_unregister - release and free resources for DMA engine
  675. */
  676. static void parport_ip32_dma_unregister(void)
  677. {
  678. #if DEBUG_PARPORT_IP32
  679. free_irq(MACEISA_PAR_MERR_IRQ, NULL);
  680. #endif
  681. free_irq(MACEISA_PAR_CTXB_IRQ, NULL);
  682. free_irq(MACEISA_PAR_CTXA_IRQ, NULL);
  683. }
  684. /*--- Interrupt handlers and associates --------------------------------*/
  685. /**
  686. * parport_ip32_wakeup - wakes up code waiting for an interrupt
  687. * @p: pointer to &struct parport
  688. */
  689. static inline void parport_ip32_wakeup(struct parport *p)
  690. {
  691. struct parport_ip32_private * const priv = p->physport->private_data;
  692. complete(&priv->irq_complete);
  693. }
  694. /**
  695. * parport_ip32_interrupt - interrupt handler
  696. * @irq: interrupt number
  697. * @dev_id: pointer to &struct parport
  698. *
  699. * Caught interrupts are forwarded to the upper parport layer if IRQ_mode is
  700. * %PARPORT_IP32_IRQ_FWD.
  701. */
  702. static irqreturn_t parport_ip32_interrupt(int irq, void *dev_id)
  703. {
  704. struct parport * const p = dev_id;
  705. struct parport_ip32_private * const priv = p->physport->private_data;
  706. enum parport_ip32_irq_mode irq_mode = priv->irq_mode;
  707. switch (irq_mode) {
  708. case PARPORT_IP32_IRQ_FWD:
  709. return parport_irq_handler(irq, dev_id);
  710. case PARPORT_IP32_IRQ_HERE:
  711. parport_ip32_wakeup(p);
  712. break;
  713. }
  714. return IRQ_HANDLED;
  715. }
  716. /*--- Some utility function to manipulate ECR register -----------------*/
  717. /**
  718. * parport_ip32_read_econtrol - read contents of the ECR register
  719. * @p: pointer to &struct parport
  720. */
  721. static inline unsigned int parport_ip32_read_econtrol(struct parport *p)
  722. {
  723. struct parport_ip32_private * const priv = p->physport->private_data;
  724. return readb(priv->regs.ecr);
  725. }
  726. /**
  727. * parport_ip32_write_econtrol - write new contents to the ECR register
  728. * @p: pointer to &struct parport
  729. * @c: new value to write
  730. */
  731. static inline void parport_ip32_write_econtrol(struct parport *p,
  732. unsigned int c)
  733. {
  734. struct parport_ip32_private * const priv = p->physport->private_data;
  735. writeb(c, priv->regs.ecr);
  736. }
  737. /**
  738. * parport_ip32_frob_econtrol - change bits from the ECR register
  739. * @p: pointer to &struct parport
  740. * @mask: bit mask of bits to change
  741. * @val: new value for changed bits
  742. *
  743. * Read from the ECR, mask out the bits in @mask, exclusive-or with the bits
  744. * in @val, and write the result to the ECR.
  745. */
  746. static inline void parport_ip32_frob_econtrol(struct parport *p,
  747. unsigned int mask,
  748. unsigned int val)
  749. {
  750. unsigned int c;
  751. c = (parport_ip32_read_econtrol(p) & ~mask) ^ val;
  752. parport_ip32_write_econtrol(p, c);
  753. }
  754. /**
  755. * parport_ip32_set_mode - change mode of ECP port
  756. * @p: pointer to &struct parport
  757. * @mode: new mode to write in ECR
  758. *
  759. * ECR is reset in a sane state (interrupts and DMA disabled), and placed in
  760. * mode @mode. Go through PS2 mode if needed.
  761. */
  762. static void parport_ip32_set_mode(struct parport *p, unsigned int mode)
  763. {
  764. unsigned int omode;
  765. mode &= ECR_MODE_MASK;
  766. omode = parport_ip32_read_econtrol(p) & ECR_MODE_MASK;
  767. if (!(mode == ECR_MODE_SPP || mode == ECR_MODE_PS2
  768. || omode == ECR_MODE_SPP || omode == ECR_MODE_PS2)) {
  769. /* We have to go through PS2 mode */
  770. unsigned int ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
  771. parport_ip32_write_econtrol(p, ecr);
  772. }
  773. parport_ip32_write_econtrol(p, mode | ECR_nERRINTR | ECR_SERVINTR);
  774. }
  775. /*--- Basic functions needed for parport -------------------------------*/
  776. /**
  777. * parport_ip32_read_data - return current contents of the DATA register
  778. * @p: pointer to &struct parport
  779. */
  780. static inline unsigned char parport_ip32_read_data(struct parport *p)
  781. {
  782. struct parport_ip32_private * const priv = p->physport->private_data;
  783. return readb(priv->regs.data);
  784. }
  785. /**
  786. * parport_ip32_write_data - set new contents for the DATA register
  787. * @p: pointer to &struct parport
  788. * @d: new value to write
  789. */
  790. static inline void parport_ip32_write_data(struct parport *p, unsigned char d)
  791. {
  792. struct parport_ip32_private * const priv = p->physport->private_data;
  793. writeb(d, priv->regs.data);
  794. }
  795. /**
  796. * parport_ip32_read_status - return current contents of the DSR register
  797. * @p: pointer to &struct parport
  798. */
  799. static inline unsigned char parport_ip32_read_status(struct parport *p)
  800. {
  801. struct parport_ip32_private * const priv = p->physport->private_data;
  802. return readb(priv->regs.dsr);
  803. }
  804. /**
  805. * __parport_ip32_read_control - return cached contents of the DCR register
  806. * @p: pointer to &struct parport
  807. */
  808. static inline unsigned int __parport_ip32_read_control(struct parport *p)
  809. {
  810. struct parport_ip32_private * const priv = p->physport->private_data;
  811. return priv->dcr_cache; /* use soft copy */
  812. }
  813. /**
  814. * __parport_ip32_write_control - set new contents for the DCR register
  815. * @p: pointer to &struct parport
  816. * @c: new value to write
  817. */
  818. static inline void __parport_ip32_write_control(struct parport *p,
  819. unsigned int c)
  820. {
  821. struct parport_ip32_private * const priv = p->physport->private_data;
  822. CHECK_EXTRA_BITS(p, c, priv->dcr_writable);
  823. c &= priv->dcr_writable; /* only writable bits */
  824. writeb(c, priv->regs.dcr);
  825. priv->dcr_cache = c; /* update soft copy */
  826. }
  827. /**
  828. * __parport_ip32_frob_control - change bits from the DCR register
  829. * @p: pointer to &struct parport
  830. * @mask: bit mask of bits to change
  831. * @val: new value for changed bits
  832. *
  833. * This is equivalent to read from the DCR, mask out the bits in @mask,
  834. * exclusive-or with the bits in @val, and write the result to the DCR.
  835. * Actually, the cached contents of the DCR is used.
  836. */
  837. static inline void __parport_ip32_frob_control(struct parport *p,
  838. unsigned int mask,
  839. unsigned int val)
  840. {
  841. unsigned int c;
  842. c = (__parport_ip32_read_control(p) & ~mask) ^ val;
  843. __parport_ip32_write_control(p, c);
  844. }
  845. /**
  846. * parport_ip32_read_control - return cached contents of the DCR register
  847. * @p: pointer to &struct parport
  848. *
  849. * The return value is masked so as to only return the value of %DCR_STROBE,
  850. * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
  851. */
  852. static inline unsigned char parport_ip32_read_control(struct parport *p)
  853. {
  854. const unsigned int rm =
  855. DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
  856. return __parport_ip32_read_control(p) & rm;
  857. }
  858. /**
  859. * parport_ip32_write_control - set new contents for the DCR register
  860. * @p: pointer to &struct parport
  861. * @c: new value to write
  862. *
  863. * The value is masked so as to only change the value of %DCR_STROBE,
  864. * %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
  865. */
  866. static inline void parport_ip32_write_control(struct parport *p,
  867. unsigned char c)
  868. {
  869. const unsigned int wm =
  870. DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
  871. CHECK_EXTRA_BITS(p, c, wm);
  872. __parport_ip32_frob_control(p, wm, c & wm);
  873. }
  874. /**
  875. * parport_ip32_frob_control - change bits from the DCR register
  876. * @p: pointer to &struct parport
  877. * @mask: bit mask of bits to change
  878. * @val: new value for changed bits
  879. *
  880. * This differs from __parport_ip32_frob_control() in that it only allows to
  881. * change the value of %DCR_STROBE, %DCR_AUTOFD, %DCR_nINIT, and %DCR_SELECT.
  882. */
  883. static inline unsigned char parport_ip32_frob_control(struct parport *p,
  884. unsigned char mask,
  885. unsigned char val)
  886. {
  887. const unsigned int wm =
  888. DCR_STROBE | DCR_AUTOFD | DCR_nINIT | DCR_SELECT;
  889. CHECK_EXTRA_BITS(p, mask, wm);
  890. CHECK_EXTRA_BITS(p, val, wm);
  891. __parport_ip32_frob_control(p, mask & wm, val & wm);
  892. return parport_ip32_read_control(p);
  893. }
  894. /**
  895. * parport_ip32_disable_irq - disable interrupts on the rising edge of nACK
  896. * @p: pointer to &struct parport
  897. */
  898. static inline void parport_ip32_disable_irq(struct parport *p)
  899. {
  900. __parport_ip32_frob_control(p, DCR_IRQ, 0);
  901. }
  902. /**
  903. * parport_ip32_enable_irq - enable interrupts on the rising edge of nACK
  904. * @p: pointer to &struct parport
  905. */
  906. static inline void parport_ip32_enable_irq(struct parport *p)
  907. {
  908. __parport_ip32_frob_control(p, DCR_IRQ, DCR_IRQ);
  909. }
  910. /**
  911. * parport_ip32_data_forward - enable host-to-peripheral communications
  912. * @p: pointer to &struct parport
  913. *
  914. * Enable the data line drivers, for 8-bit host-to-peripheral communications.
  915. */
  916. static inline void parport_ip32_data_forward(struct parport *p)
  917. {
  918. __parport_ip32_frob_control(p, DCR_DIR, 0);
  919. }
  920. /**
  921. * parport_ip32_data_reverse - enable peripheral-to-host communications
  922. * @p: pointer to &struct parport
  923. *
  924. * Place the data bus in a high impedance state, if @p->modes has the
  925. * PARPORT_MODE_TRISTATE bit set.
  926. */
  927. static inline void parport_ip32_data_reverse(struct parport *p)
  928. {
  929. __parport_ip32_frob_control(p, DCR_DIR, DCR_DIR);
  930. }
  931. /**
  932. * parport_ip32_init_state - for core parport code
  933. * @dev: pointer to &struct pardevice
  934. * @s: pointer to &struct parport_state to initialize
  935. */
  936. static void parport_ip32_init_state(struct pardevice *dev,
  937. struct parport_state *s)
  938. {
  939. s->u.ip32.dcr = DCR_SELECT | DCR_nINIT;
  940. s->u.ip32.ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
  941. }
  942. /**
  943. * parport_ip32_save_state - for core parport code
  944. * @p: pointer to &struct parport
  945. * @s: pointer to &struct parport_state to save state to
  946. */
  947. static void parport_ip32_save_state(struct parport *p,
  948. struct parport_state *s)
  949. {
  950. s->u.ip32.dcr = __parport_ip32_read_control(p);
  951. s->u.ip32.ecr = parport_ip32_read_econtrol(p);
  952. }
  953. /**
  954. * parport_ip32_restore_state - for core parport code
  955. * @p: pointer to &struct parport
  956. * @s: pointer to &struct parport_state to restore state from
  957. */
  958. static void parport_ip32_restore_state(struct parport *p,
  959. struct parport_state *s)
  960. {
  961. parport_ip32_set_mode(p, s->u.ip32.ecr & ECR_MODE_MASK);
  962. parport_ip32_write_econtrol(p, s->u.ip32.ecr);
  963. __parport_ip32_write_control(p, s->u.ip32.dcr);
  964. }
  965. /*--- EPP mode functions -----------------------------------------------*/
  966. /**
  967. * parport_ip32_clear_epp_timeout - clear Timeout bit in EPP mode
  968. * @p: pointer to &struct parport
  969. *
  970. * Returns 1 if the Timeout bit is clear, and 0 otherwise.
  971. */
  972. static unsigned int parport_ip32_clear_epp_timeout(struct parport *p)
  973. {
  974. struct parport_ip32_private * const priv = p->physport->private_data;
  975. unsigned int cleared;
  976. if (!(parport_ip32_read_status(p) & DSR_TIMEOUT))
  977. cleared = 1;
  978. else {
  979. unsigned int r;
  980. /* To clear timeout some chips require double read */
  981. parport_ip32_read_status(p);
  982. r = parport_ip32_read_status(p);
  983. /* Some reset by writing 1 */
  984. writeb(r | DSR_TIMEOUT, priv->regs.dsr);
  985. /* Others by writing 0 */
  986. writeb(r & ~DSR_TIMEOUT, priv->regs.dsr);
  987. r = parport_ip32_read_status(p);
  988. cleared = !(r & DSR_TIMEOUT);
  989. }
  990. pr_trace(p, "(): %s", cleared ? "cleared" : "failed");
  991. return cleared;
  992. }
  993. /**
  994. * parport_ip32_epp_read - generic EPP read function
  995. * @eppreg: I/O register to read from
  996. * @p: pointer to &struct parport
  997. * @buf: buffer to store read data
  998. * @len: length of buffer @buf
  999. * @flags: may be PARPORT_EPP_FAST
  1000. */
  1001. static size_t parport_ip32_epp_read(void __iomem *eppreg,
  1002. struct parport *p, void *buf,
  1003. size_t len, int flags)
  1004. {
  1005. struct parport_ip32_private * const priv = p->physport->private_data;
  1006. size_t got;
  1007. parport_ip32_set_mode(p, ECR_MODE_EPP);
  1008. parport_ip32_data_reverse(p);
  1009. parport_ip32_write_control(p, DCR_nINIT);
  1010. if ((flags & PARPORT_EPP_FAST) && (len > 1)) {
  1011. readsb(eppreg, buf, len);
  1012. if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
  1013. parport_ip32_clear_epp_timeout(p);
  1014. return -EIO;
  1015. }
  1016. got = len;
  1017. } else {
  1018. u8 *bufp = buf;
  1019. for (got = 0; got < len; got++) {
  1020. *bufp++ = readb(eppreg);
  1021. if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
  1022. parport_ip32_clear_epp_timeout(p);
  1023. break;
  1024. }
  1025. }
  1026. }
  1027. parport_ip32_data_forward(p);
  1028. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1029. return got;
  1030. }
  1031. /**
  1032. * parport_ip32_epp_write - generic EPP write function
  1033. * @eppreg: I/O register to write to
  1034. * @p: pointer to &struct parport
  1035. * @buf: buffer of data to write
  1036. * @len: length of buffer @buf
  1037. * @flags: may be PARPORT_EPP_FAST
  1038. */
  1039. static size_t parport_ip32_epp_write(void __iomem *eppreg,
  1040. struct parport *p, const void *buf,
  1041. size_t len, int flags)
  1042. {
  1043. struct parport_ip32_private * const priv = p->physport->private_data;
  1044. size_t written;
  1045. parport_ip32_set_mode(p, ECR_MODE_EPP);
  1046. parport_ip32_data_forward(p);
  1047. parport_ip32_write_control(p, DCR_nINIT);
  1048. if ((flags & PARPORT_EPP_FAST) && (len > 1)) {
  1049. writesb(eppreg, buf, len);
  1050. if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
  1051. parport_ip32_clear_epp_timeout(p);
  1052. return -EIO;
  1053. }
  1054. written = len;
  1055. } else {
  1056. const u8 *bufp = buf;
  1057. for (written = 0; written < len; written++) {
  1058. writeb(*bufp++, eppreg);
  1059. if (readb(priv->regs.dsr) & DSR_TIMEOUT) {
  1060. parport_ip32_clear_epp_timeout(p);
  1061. break;
  1062. }
  1063. }
  1064. }
  1065. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1066. return written;
  1067. }
  1068. /**
  1069. * parport_ip32_epp_read_data - read a block of data in EPP mode
  1070. * @p: pointer to &struct parport
  1071. * @buf: buffer to store read data
  1072. * @len: length of buffer @buf
  1073. * @flags: may be PARPORT_EPP_FAST
  1074. */
  1075. static size_t parport_ip32_epp_read_data(struct parport *p, void *buf,
  1076. size_t len, int flags)
  1077. {
  1078. struct parport_ip32_private * const priv = p->physport->private_data;
  1079. return parport_ip32_epp_read(priv->regs.eppData0, p, buf, len, flags);
  1080. }
  1081. /**
  1082. * parport_ip32_epp_write_data - write a block of data in EPP mode
  1083. * @p: pointer to &struct parport
  1084. * @buf: buffer of data to write
  1085. * @len: length of buffer @buf
  1086. * @flags: may be PARPORT_EPP_FAST
  1087. */
  1088. static size_t parport_ip32_epp_write_data(struct parport *p, const void *buf,
  1089. size_t len, int flags)
  1090. {
  1091. struct parport_ip32_private * const priv = p->physport->private_data;
  1092. return parport_ip32_epp_write(priv->regs.eppData0, p, buf, len, flags);
  1093. }
  1094. /**
  1095. * parport_ip32_epp_read_addr - read a block of addresses in EPP mode
  1096. * @p: pointer to &struct parport
  1097. * @buf: buffer to store read data
  1098. * @len: length of buffer @buf
  1099. * @flags: may be PARPORT_EPP_FAST
  1100. */
  1101. static size_t parport_ip32_epp_read_addr(struct parport *p, void *buf,
  1102. size_t len, int flags)
  1103. {
  1104. struct parport_ip32_private * const priv = p->physport->private_data;
  1105. return parport_ip32_epp_read(priv->regs.eppAddr, p, buf, len, flags);
  1106. }
  1107. /**
  1108. * parport_ip32_epp_write_addr - write a block of addresses in EPP mode
  1109. * @p: pointer to &struct parport
  1110. * @buf: buffer of data to write
  1111. * @len: length of buffer @buf
  1112. * @flags: may be PARPORT_EPP_FAST
  1113. */
  1114. static size_t parport_ip32_epp_write_addr(struct parport *p, const void *buf,
  1115. size_t len, int flags)
  1116. {
  1117. struct parport_ip32_private * const priv = p->physport->private_data;
  1118. return parport_ip32_epp_write(priv->regs.eppAddr, p, buf, len, flags);
  1119. }
  1120. /*--- ECP mode functions (FIFO) ----------------------------------------*/
  1121. /**
  1122. * parport_ip32_fifo_wait_break - check if the waiting function should return
  1123. * @p: pointer to &struct parport
  1124. * @expire: timeout expiring date, in jiffies
  1125. *
  1126. * parport_ip32_fifo_wait_break() checks if the waiting function should return
  1127. * immediately or not. The break conditions are:
  1128. * - expired timeout;
  1129. * - a pending signal;
  1130. * - nFault asserted low.
  1131. * This function also calls cond_resched().
  1132. */
  1133. static unsigned int parport_ip32_fifo_wait_break(struct parport *p,
  1134. unsigned long expire)
  1135. {
  1136. cond_resched();
  1137. if (time_after(jiffies, expire)) {
  1138. pr_debug1(PPIP32 "%s: FIFO write timed out\n", p->name);
  1139. return 1;
  1140. }
  1141. if (signal_pending(current)) {
  1142. pr_debug1(PPIP32 "%s: Signal pending\n", p->name);
  1143. return 1;
  1144. }
  1145. if (!(parport_ip32_read_status(p) & DSR_nFAULT)) {
  1146. pr_debug1(PPIP32 "%s: nFault asserted low\n", p->name);
  1147. return 1;
  1148. }
  1149. return 0;
  1150. }
  1151. /**
  1152. * parport_ip32_fwp_wait_polling - wait for FIFO to empty (polling)
  1153. * @p: pointer to &struct parport
  1154. *
  1155. * Returns the number of bytes that can safely be written in the FIFO. A
  1156. * return value of zero means that the calling function should terminate as
  1157. * fast as possible.
  1158. */
  1159. static unsigned int parport_ip32_fwp_wait_polling(struct parport *p)
  1160. {
  1161. struct parport_ip32_private * const priv = p->physport->private_data;
  1162. struct parport * const physport = p->physport;
  1163. unsigned long expire;
  1164. unsigned int count;
  1165. unsigned int ecr;
  1166. expire = jiffies + physport->cad->timeout;
  1167. count = 0;
  1168. while (1) {
  1169. if (parport_ip32_fifo_wait_break(p, expire))
  1170. break;
  1171. /* Check FIFO state. We do nothing when the FIFO is nor full,
  1172. * nor empty. It appears that the FIFO full bit is not always
  1173. * reliable, the FIFO state is sometimes wrongly reported, and
  1174. * the chip gets confused if we give it another byte. */
  1175. ecr = parport_ip32_read_econtrol(p);
  1176. if (ecr & ECR_F_EMPTY) {
  1177. /* FIFO is empty, fill it up */
  1178. count = priv->fifo_depth;
  1179. break;
  1180. }
  1181. /* Wait a moment... */
  1182. udelay(FIFO_POLLING_INTERVAL);
  1183. } /* while (1) */
  1184. return count;
  1185. }
  1186. /**
  1187. * parport_ip32_fwp_wait_interrupt - wait for FIFO to empty (interrupt-driven)
  1188. * @p: pointer to &struct parport
  1189. *
  1190. * Returns the number of bytes that can safely be written in the FIFO. A
  1191. * return value of zero means that the calling function should terminate as
  1192. * fast as possible.
  1193. */
  1194. static unsigned int parport_ip32_fwp_wait_interrupt(struct parport *p)
  1195. {
  1196. static unsigned int lost_interrupt = 0;
  1197. struct parport_ip32_private * const priv = p->physport->private_data;
  1198. struct parport * const physport = p->physport;
  1199. unsigned long nfault_timeout;
  1200. unsigned long expire;
  1201. unsigned int count;
  1202. unsigned int ecr;
  1203. nfault_timeout = min((unsigned long)physport->cad->timeout,
  1204. msecs_to_jiffies(FIFO_NFAULT_TIMEOUT));
  1205. expire = jiffies + physport->cad->timeout;
  1206. count = 0;
  1207. while (1) {
  1208. if (parport_ip32_fifo_wait_break(p, expire))
  1209. break;
  1210. /* Initialize mutex used to take interrupts into account */
  1211. reinit_completion(&priv->irq_complete);
  1212. /* Enable serviceIntr */
  1213. parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
  1214. /* Enabling serviceIntr while the FIFO is empty does not
  1215. * always generate an interrupt, so check for emptiness
  1216. * now. */
  1217. ecr = parport_ip32_read_econtrol(p);
  1218. if (!(ecr & ECR_F_EMPTY)) {
  1219. /* FIFO is not empty: wait for an interrupt or a
  1220. * timeout to occur */
  1221. wait_for_completion_interruptible_timeout(
  1222. &priv->irq_complete, nfault_timeout);
  1223. ecr = parport_ip32_read_econtrol(p);
  1224. if ((ecr & ECR_F_EMPTY) && !(ecr & ECR_SERVINTR)
  1225. && !lost_interrupt) {
  1226. printk(KERN_WARNING PPIP32
  1227. "%s: lost interrupt in %s\n",
  1228. p->name, __func__);
  1229. lost_interrupt = 1;
  1230. }
  1231. }
  1232. /* Disable serviceIntr */
  1233. parport_ip32_frob_econtrol(p, ECR_SERVINTR, ECR_SERVINTR);
  1234. /* Check FIFO state */
  1235. if (ecr & ECR_F_EMPTY) {
  1236. /* FIFO is empty, fill it up */
  1237. count = priv->fifo_depth;
  1238. break;
  1239. } else if (ecr & ECR_SERVINTR) {
  1240. /* FIFO is not empty, but we know that can safely push
  1241. * writeIntrThreshold bytes into it */
  1242. count = priv->writeIntrThreshold;
  1243. break;
  1244. }
  1245. /* FIFO is not empty, and we did not get any interrupt.
  1246. * Either it's time to check for nFault, or a signal is
  1247. * pending. This is verified in
  1248. * parport_ip32_fifo_wait_break(), so we continue the loop. */
  1249. } /* while (1) */
  1250. return count;
  1251. }
  1252. /**
  1253. * parport_ip32_fifo_write_block_pio - write a block of data (PIO mode)
  1254. * @p: pointer to &struct parport
  1255. * @buf: buffer of data to write
  1256. * @len: length of buffer @buf
  1257. *
  1258. * Uses PIO to write the contents of the buffer @buf into the parallel port
  1259. * FIFO. Returns the number of bytes that were actually written. It can work
  1260. * with or without the help of interrupts. The parallel port must be
  1261. * correctly initialized before calling parport_ip32_fifo_write_block_pio().
  1262. */
  1263. static size_t parport_ip32_fifo_write_block_pio(struct parport *p,
  1264. const void *buf, size_t len)
  1265. {
  1266. struct parport_ip32_private * const priv = p->physport->private_data;
  1267. const u8 *bufp = buf;
  1268. size_t left = len;
  1269. priv->irq_mode = PARPORT_IP32_IRQ_HERE;
  1270. while (left > 0) {
  1271. unsigned int count;
  1272. count = (p->irq == PARPORT_IRQ_NONE) ?
  1273. parport_ip32_fwp_wait_polling(p) :
  1274. parport_ip32_fwp_wait_interrupt(p);
  1275. if (count == 0)
  1276. break; /* Transmission should be stopped */
  1277. if (count > left)
  1278. count = left;
  1279. if (count == 1) {
  1280. writeb(*bufp, priv->regs.fifo);
  1281. bufp++, left--;
  1282. } else {
  1283. writesb(priv->regs.fifo, bufp, count);
  1284. bufp += count, left -= count;
  1285. }
  1286. }
  1287. priv->irq_mode = PARPORT_IP32_IRQ_FWD;
  1288. return len - left;
  1289. }
  1290. /**
  1291. * parport_ip32_fifo_write_block_dma - write a block of data (DMA mode)
  1292. * @p: pointer to &struct parport
  1293. * @buf: buffer of data to write
  1294. * @len: length of buffer @buf
  1295. *
  1296. * Uses DMA to write the contents of the buffer @buf into the parallel port
  1297. * FIFO. Returns the number of bytes that were actually written. The
  1298. * parallel port must be correctly initialized before calling
  1299. * parport_ip32_fifo_write_block_dma().
  1300. */
  1301. static size_t parport_ip32_fifo_write_block_dma(struct parport *p,
  1302. const void *buf, size_t len)
  1303. {
  1304. struct parport_ip32_private * const priv = p->physport->private_data;
  1305. struct parport * const physport = p->physport;
  1306. unsigned long nfault_timeout;
  1307. unsigned long expire;
  1308. size_t written;
  1309. unsigned int ecr;
  1310. priv->irq_mode = PARPORT_IP32_IRQ_HERE;
  1311. parport_ip32_dma_start(p, DMA_TO_DEVICE, (void *)buf, len);
  1312. reinit_completion(&priv->irq_complete);
  1313. parport_ip32_frob_econtrol(p, ECR_DMAEN | ECR_SERVINTR, ECR_DMAEN);
  1314. nfault_timeout = min((unsigned long)physport->cad->timeout,
  1315. msecs_to_jiffies(FIFO_NFAULT_TIMEOUT));
  1316. expire = jiffies + physport->cad->timeout;
  1317. while (1) {
  1318. if (parport_ip32_fifo_wait_break(p, expire))
  1319. break;
  1320. wait_for_completion_interruptible_timeout(&priv->irq_complete,
  1321. nfault_timeout);
  1322. ecr = parport_ip32_read_econtrol(p);
  1323. if (ecr & ECR_SERVINTR)
  1324. break; /* DMA transfer just finished */
  1325. }
  1326. parport_ip32_dma_stop(p);
  1327. written = len - parport_ip32_dma_get_residue();
  1328. priv->irq_mode = PARPORT_IP32_IRQ_FWD;
  1329. return written;
  1330. }
  1331. /**
  1332. * parport_ip32_fifo_write_block - write a block of data
  1333. * @p: pointer to &struct parport
  1334. * @buf: buffer of data to write
  1335. * @len: length of buffer @buf
  1336. *
  1337. * Uses PIO or DMA to write the contents of the buffer @buf into the parallel
  1338. * p FIFO. Returns the number of bytes that were actually written.
  1339. */
  1340. static size_t parport_ip32_fifo_write_block(struct parport *p,
  1341. const void *buf, size_t len)
  1342. {
  1343. size_t written = 0;
  1344. if (len)
  1345. /* FIXME - Maybe some threshold value should be set for @len
  1346. * under which we revert to PIO mode? */
  1347. written = (p->modes & PARPORT_MODE_DMA) ?
  1348. parport_ip32_fifo_write_block_dma(p, buf, len) :
  1349. parport_ip32_fifo_write_block_pio(p, buf, len);
  1350. return written;
  1351. }
  1352. /**
  1353. * parport_ip32_drain_fifo - wait for FIFO to empty
  1354. * @p: pointer to &struct parport
  1355. * @timeout: timeout, in jiffies
  1356. *
  1357. * This function waits for FIFO to empty. It returns 1 when FIFO is empty, or
  1358. * 0 if the timeout @timeout is reached before, or if a signal is pending.
  1359. */
  1360. static unsigned int parport_ip32_drain_fifo(struct parport *p,
  1361. unsigned long timeout)
  1362. {
  1363. unsigned long expire = jiffies + timeout;
  1364. unsigned int polling_interval;
  1365. unsigned int counter;
  1366. /* Busy wait for approx. 200us */
  1367. for (counter = 0; counter < 40; counter++) {
  1368. if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
  1369. break;
  1370. if (time_after(jiffies, expire))
  1371. break;
  1372. if (signal_pending(current))
  1373. break;
  1374. udelay(5);
  1375. }
  1376. /* Poll slowly. Polling interval starts with 1 millisecond, and is
  1377. * increased exponentially until 128. */
  1378. polling_interval = 1; /* msecs */
  1379. while (!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY)) {
  1380. if (time_after_eq(jiffies, expire))
  1381. break;
  1382. msleep_interruptible(polling_interval);
  1383. if (signal_pending(current))
  1384. break;
  1385. if (polling_interval < 128)
  1386. polling_interval *= 2;
  1387. }
  1388. return !!(parport_ip32_read_econtrol(p) & ECR_F_EMPTY);
  1389. }
  1390. /**
  1391. * parport_ip32_get_fifo_residue - reset FIFO
  1392. * @p: pointer to &struct parport
  1393. * @mode: current operation mode (ECR_MODE_PPF or ECR_MODE_ECP)
  1394. *
  1395. * This function resets FIFO, and returns the number of bytes remaining in it.
  1396. */
  1397. static unsigned int parport_ip32_get_fifo_residue(struct parport *p,
  1398. unsigned int mode)
  1399. {
  1400. struct parport_ip32_private * const priv = p->physport->private_data;
  1401. unsigned int residue;
  1402. unsigned int cnfga;
  1403. /* FIXME - We are missing one byte if the printer is off-line. I
  1404. * don't know how to detect this. It looks that the full bit is not
  1405. * always reliable. For the moment, the problem is avoided in most
  1406. * cases by testing for BUSY in parport_ip32_compat_write_data().
  1407. */
  1408. if (parport_ip32_read_econtrol(p) & ECR_F_EMPTY)
  1409. residue = 0;
  1410. else {
  1411. pr_debug1(PPIP32 "%s: FIFO is stuck\n", p->name);
  1412. /* Stop all transfers.
  1413. *
  1414. * Microsoft's document instructs to drive DCR_STROBE to 0,
  1415. * but it doesn't work (at least in Compatibility mode, not
  1416. * tested in ECP mode). Switching directly to Test mode (as
  1417. * in parport_pc) is not an option: it does confuse the port,
  1418. * ECP service interrupts are no more working after that. A
  1419. * hard reset is then needed to revert to a sane state.
  1420. *
  1421. * Let's hope that the FIFO is really stuck and that the
  1422. * peripheral doesn't wake up now.
  1423. */
  1424. parport_ip32_frob_control(p, DCR_STROBE, 0);
  1425. /* Fill up FIFO */
  1426. for (residue = priv->fifo_depth; residue > 0; residue--) {
  1427. if (parport_ip32_read_econtrol(p) & ECR_F_FULL)
  1428. break;
  1429. writeb(0x00, priv->regs.fifo);
  1430. }
  1431. }
  1432. if (residue)
  1433. pr_debug1(PPIP32 "%s: %d PWord%s left in FIFO\n",
  1434. p->name, residue,
  1435. (residue == 1) ? " was" : "s were");
  1436. /* Now reset the FIFO */
  1437. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1438. /* Host recovery for ECP mode */
  1439. if (mode == ECR_MODE_ECP) {
  1440. parport_ip32_data_reverse(p);
  1441. parport_ip32_frob_control(p, DCR_nINIT, 0);
  1442. if (parport_wait_peripheral(p, DSR_PERROR, 0))
  1443. pr_debug1(PPIP32 "%s: PEerror timeout 1 in %s\n",
  1444. p->name, __func__);
  1445. parport_ip32_frob_control(p, DCR_STROBE, DCR_STROBE);
  1446. parport_ip32_frob_control(p, DCR_nINIT, DCR_nINIT);
  1447. if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR))
  1448. pr_debug1(PPIP32 "%s: PEerror timeout 2 in %s\n",
  1449. p->name, __func__);
  1450. }
  1451. /* Adjust residue if needed */
  1452. parport_ip32_set_mode(p, ECR_MODE_CFG);
  1453. cnfga = readb(priv->regs.cnfgA);
  1454. if (!(cnfga & CNFGA_nBYTEINTRANS)) {
  1455. pr_debug1(PPIP32 "%s: cnfgA contains 0x%02x\n",
  1456. p->name, cnfga);
  1457. pr_debug1(PPIP32 "%s: Accounting for extra byte\n",
  1458. p->name);
  1459. residue++;
  1460. }
  1461. /* Don't care about partial PWords since we do not support
  1462. * PWord != 1 byte. */
  1463. /* Back to forward PS2 mode. */
  1464. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1465. parport_ip32_data_forward(p);
  1466. return residue;
  1467. }
  1468. /**
  1469. * parport_ip32_compat_write_data - write a block of data in SPP mode
  1470. * @p: pointer to &struct parport
  1471. * @buf: buffer of data to write
  1472. * @len: length of buffer @buf
  1473. * @flags: ignored
  1474. */
  1475. static size_t parport_ip32_compat_write_data(struct parport *p,
  1476. const void *buf, size_t len,
  1477. int flags)
  1478. {
  1479. static unsigned int ready_before = 1;
  1480. struct parport_ip32_private * const priv = p->physport->private_data;
  1481. struct parport * const physport = p->physport;
  1482. size_t written = 0;
  1483. /* Special case: a timeout of zero means we cannot call schedule().
  1484. * Also if O_NONBLOCK is set then use the default implementation. */
  1485. if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
  1486. return parport_ieee1284_write_compat(p, buf, len, flags);
  1487. /* Reset FIFO, go in forward mode, and disable ackIntEn */
  1488. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1489. parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
  1490. parport_ip32_data_forward(p);
  1491. parport_ip32_disable_irq(p);
  1492. parport_ip32_set_mode(p, ECR_MODE_PPF);
  1493. physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
  1494. /* Wait for peripheral to become ready */
  1495. if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
  1496. DSR_nBUSY | DSR_nFAULT)) {
  1497. /* Avoid to flood the logs */
  1498. if (ready_before)
  1499. printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
  1500. p->name, __func__);
  1501. ready_before = 0;
  1502. goto stop;
  1503. }
  1504. ready_before = 1;
  1505. written = parport_ip32_fifo_write_block(p, buf, len);
  1506. /* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */
  1507. parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
  1508. /* Check for a potential residue */
  1509. written -= parport_ip32_get_fifo_residue(p, ECR_MODE_PPF);
  1510. /* Then, wait for BUSY to get low. */
  1511. if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
  1512. printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n",
  1513. p->name, __func__);
  1514. stop:
  1515. /* Reset FIFO */
  1516. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1517. physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
  1518. return written;
  1519. }
  1520. /*
  1521. * FIXME - Insert here parport_ip32_ecp_read_data().
  1522. */
  1523. /**
  1524. * parport_ip32_ecp_write_data - write a block of data in ECP mode
  1525. * @p: pointer to &struct parport
  1526. * @buf: buffer of data to write
  1527. * @len: length of buffer @buf
  1528. * @flags: ignored
  1529. */
  1530. static size_t parport_ip32_ecp_write_data(struct parport *p,
  1531. const void *buf, size_t len,
  1532. int flags)
  1533. {
  1534. static unsigned int ready_before = 1;
  1535. struct parport_ip32_private * const priv = p->physport->private_data;
  1536. struct parport * const physport = p->physport;
  1537. size_t written = 0;
  1538. /* Special case: a timeout of zero means we cannot call schedule().
  1539. * Also if O_NONBLOCK is set then use the default implementation. */
  1540. if (physport->cad->timeout <= PARPORT_INACTIVITY_O_NONBLOCK)
  1541. return parport_ieee1284_ecp_write_data(p, buf, len, flags);
  1542. /* Negotiate to forward mode if necessary. */
  1543. if (physport->ieee1284.phase != IEEE1284_PH_FWD_IDLE) {
  1544. /* Event 47: Set nInit high. */
  1545. parport_ip32_frob_control(p, DCR_nINIT | DCR_AUTOFD,
  1546. DCR_nINIT | DCR_AUTOFD);
  1547. /* Event 49: PError goes high. */
  1548. if (parport_wait_peripheral(p, DSR_PERROR, DSR_PERROR)) {
  1549. printk(KERN_DEBUG PPIP32 "%s: PError timeout in %s",
  1550. p->name, __func__);
  1551. physport->ieee1284.phase = IEEE1284_PH_ECP_DIR_UNKNOWN;
  1552. return 0;
  1553. }
  1554. }
  1555. /* Reset FIFO, go in forward mode, and disable ackIntEn */
  1556. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1557. parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
  1558. parport_ip32_data_forward(p);
  1559. parport_ip32_disable_irq(p);
  1560. parport_ip32_set_mode(p, ECR_MODE_ECP);
  1561. physport->ieee1284.phase = IEEE1284_PH_FWD_DATA;
  1562. /* Wait for peripheral to become ready */
  1563. if (parport_wait_peripheral(p, DSR_nBUSY | DSR_nFAULT,
  1564. DSR_nBUSY | DSR_nFAULT)) {
  1565. /* Avoid to flood the logs */
  1566. if (ready_before)
  1567. printk(KERN_INFO PPIP32 "%s: not ready in %s\n",
  1568. p->name, __func__);
  1569. ready_before = 0;
  1570. goto stop;
  1571. }
  1572. ready_before = 1;
  1573. written = parport_ip32_fifo_write_block(p, buf, len);
  1574. /* Wait FIFO to empty. Timeout is proportional to FIFO_depth. */
  1575. parport_ip32_drain_fifo(p, physport->cad->timeout * priv->fifo_depth);
  1576. /* Check for a potential residue */
  1577. written -= parport_ip32_get_fifo_residue(p, ECR_MODE_ECP);
  1578. /* Then, wait for BUSY to get low. */
  1579. if (parport_wait_peripheral(p, DSR_nBUSY, DSR_nBUSY))
  1580. printk(KERN_DEBUG PPIP32 "%s: BUSY timeout in %s\n",
  1581. p->name, __func__);
  1582. stop:
  1583. /* Reset FIFO */
  1584. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1585. physport->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
  1586. return written;
  1587. }
  1588. /*
  1589. * FIXME - Insert here parport_ip32_ecp_write_addr().
  1590. */
  1591. /*--- Default parport operations ---------------------------------------*/
  1592. static const struct parport_operations parport_ip32_ops __initconst = {
  1593. .write_data = parport_ip32_write_data,
  1594. .read_data = parport_ip32_read_data,
  1595. .write_control = parport_ip32_write_control,
  1596. .read_control = parport_ip32_read_control,
  1597. .frob_control = parport_ip32_frob_control,
  1598. .read_status = parport_ip32_read_status,
  1599. .enable_irq = parport_ip32_enable_irq,
  1600. .disable_irq = parport_ip32_disable_irq,
  1601. .data_forward = parport_ip32_data_forward,
  1602. .data_reverse = parport_ip32_data_reverse,
  1603. .init_state = parport_ip32_init_state,
  1604. .save_state = parport_ip32_save_state,
  1605. .restore_state = parport_ip32_restore_state,
  1606. .epp_write_data = parport_ieee1284_epp_write_data,
  1607. .epp_read_data = parport_ieee1284_epp_read_data,
  1608. .epp_write_addr = parport_ieee1284_epp_write_addr,
  1609. .epp_read_addr = parport_ieee1284_epp_read_addr,
  1610. .ecp_write_data = parport_ieee1284_ecp_write_data,
  1611. .ecp_read_data = parport_ieee1284_ecp_read_data,
  1612. .ecp_write_addr = parport_ieee1284_ecp_write_addr,
  1613. .compat_write_data = parport_ieee1284_write_compat,
  1614. .nibble_read_data = parport_ieee1284_read_nibble,
  1615. .byte_read_data = parport_ieee1284_read_byte,
  1616. .owner = THIS_MODULE,
  1617. };
  1618. /*--- Device detection -------------------------------------------------*/
  1619. /**
  1620. * parport_ip32_ecp_supported - check for an ECP port
  1621. * @p: pointer to the &parport structure
  1622. *
  1623. * Returns 1 if an ECP port is found, and 0 otherwise. This function actually
  1624. * checks if an Extended Control Register seems to be present. On successful
  1625. * return, the port is placed in SPP mode.
  1626. */
  1627. static __init unsigned int parport_ip32_ecp_supported(struct parport *p)
  1628. {
  1629. struct parport_ip32_private * const priv = p->physport->private_data;
  1630. unsigned int ecr;
  1631. ecr = ECR_MODE_PS2 | ECR_nERRINTR | ECR_SERVINTR;
  1632. writeb(ecr, priv->regs.ecr);
  1633. if (readb(priv->regs.ecr) != (ecr | ECR_F_EMPTY))
  1634. goto fail;
  1635. pr_probe(p, "Found working ECR register\n");
  1636. parport_ip32_set_mode(p, ECR_MODE_SPP);
  1637. parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
  1638. return 1;
  1639. fail:
  1640. pr_probe(p, "ECR register not found\n");
  1641. return 0;
  1642. }
  1643. /**
  1644. * parport_ip32_fifo_supported - check for FIFO parameters
  1645. * @p: pointer to the &parport structure
  1646. *
  1647. * Check for FIFO parameters of an Extended Capabilities Port. Returns 1 on
  1648. * success, and 0 otherwise. Adjust FIFO parameters in the parport structure.
  1649. * On return, the port is placed in SPP mode.
  1650. */
  1651. static __init unsigned int parport_ip32_fifo_supported(struct parport *p)
  1652. {
  1653. struct parport_ip32_private * const priv = p->physport->private_data;
  1654. unsigned int configa, configb;
  1655. unsigned int pword;
  1656. unsigned int i;
  1657. /* Configuration mode */
  1658. parport_ip32_set_mode(p, ECR_MODE_CFG);
  1659. configa = readb(priv->regs.cnfgA);
  1660. configb = readb(priv->regs.cnfgB);
  1661. /* Find out PWord size */
  1662. switch (configa & CNFGA_ID_MASK) {
  1663. case CNFGA_ID_8:
  1664. pword = 1;
  1665. break;
  1666. case CNFGA_ID_16:
  1667. pword = 2;
  1668. break;
  1669. case CNFGA_ID_32:
  1670. pword = 4;
  1671. break;
  1672. default:
  1673. pr_probe(p, "Unknown implementation ID: 0x%0x\n",
  1674. (configa & CNFGA_ID_MASK) >> CNFGA_ID_SHIFT);
  1675. goto fail;
  1676. break;
  1677. }
  1678. if (pword != 1) {
  1679. pr_probe(p, "Unsupported PWord size: %u\n", pword);
  1680. goto fail;
  1681. }
  1682. priv->pword = pword;
  1683. pr_probe(p, "PWord is %u bits\n", 8 * priv->pword);
  1684. /* Check for compression support */
  1685. writeb(configb | CNFGB_COMPRESS, priv->regs.cnfgB);
  1686. if (readb(priv->regs.cnfgB) & CNFGB_COMPRESS)
  1687. pr_probe(p, "Hardware compression detected (unsupported)\n");
  1688. writeb(configb & ~CNFGB_COMPRESS, priv->regs.cnfgB);
  1689. /* Reset FIFO and go in test mode (no interrupt, no DMA) */
  1690. parport_ip32_set_mode(p, ECR_MODE_TST);
  1691. /* FIFO must be empty now */
  1692. if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) {
  1693. pr_probe(p, "FIFO not reset\n");
  1694. goto fail;
  1695. }
  1696. /* Find out FIFO depth. */
  1697. priv->fifo_depth = 0;
  1698. for (i = 0; i < 1024; i++) {
  1699. if (readb(priv->regs.ecr) & ECR_F_FULL) {
  1700. /* FIFO full */
  1701. priv->fifo_depth = i;
  1702. break;
  1703. }
  1704. writeb((u8)i, priv->regs.fifo);
  1705. }
  1706. if (i >= 1024) {
  1707. pr_probe(p, "Can't fill FIFO\n");
  1708. goto fail;
  1709. }
  1710. if (!priv->fifo_depth) {
  1711. pr_probe(p, "Can't get FIFO depth\n");
  1712. goto fail;
  1713. }
  1714. pr_probe(p, "FIFO is %u PWords deep\n", priv->fifo_depth);
  1715. /* Enable interrupts */
  1716. parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
  1717. /* Find out writeIntrThreshold: number of PWords we know we can write
  1718. * if we get an interrupt. */
  1719. priv->writeIntrThreshold = 0;
  1720. for (i = 0; i < priv->fifo_depth; i++) {
  1721. if (readb(priv->regs.fifo) != (u8)i) {
  1722. pr_probe(p, "Invalid data in FIFO\n");
  1723. goto fail;
  1724. }
  1725. if (!priv->writeIntrThreshold
  1726. && readb(priv->regs.ecr) & ECR_SERVINTR)
  1727. /* writeIntrThreshold reached */
  1728. priv->writeIntrThreshold = i + 1;
  1729. if (i + 1 < priv->fifo_depth
  1730. && readb(priv->regs.ecr) & ECR_F_EMPTY) {
  1731. /* FIFO empty before the last byte? */
  1732. pr_probe(p, "Data lost in FIFO\n");
  1733. goto fail;
  1734. }
  1735. }
  1736. if (!priv->writeIntrThreshold) {
  1737. pr_probe(p, "Can't get writeIntrThreshold\n");
  1738. goto fail;
  1739. }
  1740. pr_probe(p, "writeIntrThreshold is %u\n", priv->writeIntrThreshold);
  1741. /* FIFO must be empty now */
  1742. if (!(readb(priv->regs.ecr) & ECR_F_EMPTY)) {
  1743. pr_probe(p, "Can't empty FIFO\n");
  1744. goto fail;
  1745. }
  1746. /* Reset FIFO */
  1747. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1748. /* Set reverse direction (must be in PS2 mode) */
  1749. parport_ip32_data_reverse(p);
  1750. /* Test FIFO, no interrupt, no DMA */
  1751. parport_ip32_set_mode(p, ECR_MODE_TST);
  1752. /* Enable interrupts */
  1753. parport_ip32_frob_econtrol(p, ECR_SERVINTR, 0);
  1754. /* Find out readIntrThreshold: number of PWords we can read if we get
  1755. * an interrupt. */
  1756. priv->readIntrThreshold = 0;
  1757. for (i = 0; i < priv->fifo_depth; i++) {
  1758. writeb(0xaa, priv->regs.fifo);
  1759. if (readb(priv->regs.ecr) & ECR_SERVINTR) {
  1760. /* readIntrThreshold reached */
  1761. priv->readIntrThreshold = i + 1;
  1762. break;
  1763. }
  1764. }
  1765. if (!priv->readIntrThreshold) {
  1766. pr_probe(p, "Can't get readIntrThreshold\n");
  1767. goto fail;
  1768. }
  1769. pr_probe(p, "readIntrThreshold is %u\n", priv->readIntrThreshold);
  1770. /* Reset ECR */
  1771. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1772. parport_ip32_data_forward(p);
  1773. parport_ip32_set_mode(p, ECR_MODE_SPP);
  1774. return 1;
  1775. fail:
  1776. priv->fifo_depth = 0;
  1777. parport_ip32_set_mode(p, ECR_MODE_SPP);
  1778. return 0;
  1779. }
  1780. /*--- Initialization code ----------------------------------------------*/
  1781. /**
  1782. * parport_ip32_make_isa_registers - compute (ISA) register addresses
  1783. * @regs: pointer to &struct parport_ip32_regs to fill
  1784. * @base: base address of standard and EPP registers
  1785. * @base_hi: base address of ECP registers
  1786. * @regshift: how much to shift register offset by
  1787. *
  1788. * Compute register addresses, according to the ISA standard. The addresses
  1789. * of the standard and EPP registers are computed from address @base. The
  1790. * addresses of the ECP registers are computed from address @base_hi.
  1791. */
  1792. static void __init
  1793. parport_ip32_make_isa_registers(struct parport_ip32_regs *regs,
  1794. void __iomem *base, void __iomem *base_hi,
  1795. unsigned int regshift)
  1796. {
  1797. #define r_base(offset) ((u8 __iomem *)base + ((offset) << regshift))
  1798. #define r_base_hi(offset) ((u8 __iomem *)base_hi + ((offset) << regshift))
  1799. *regs = (struct parport_ip32_regs){
  1800. .data = r_base(0),
  1801. .dsr = r_base(1),
  1802. .dcr = r_base(2),
  1803. .eppAddr = r_base(3),
  1804. .eppData0 = r_base(4),
  1805. .eppData1 = r_base(5),
  1806. .eppData2 = r_base(6),
  1807. .eppData3 = r_base(7),
  1808. .ecpAFifo = r_base(0),
  1809. .fifo = r_base_hi(0),
  1810. .cnfgA = r_base_hi(0),
  1811. .cnfgB = r_base_hi(1),
  1812. .ecr = r_base_hi(2)
  1813. };
  1814. #undef r_base_hi
  1815. #undef r_base
  1816. }
  1817. /**
  1818. * parport_ip32_probe_port - probe and register IP32 built-in parallel port
  1819. *
  1820. * Returns the new allocated &parport structure. On error, an error code is
  1821. * encoded in return value with the ERR_PTR function.
  1822. */
  1823. static __init struct parport *parport_ip32_probe_port(void)
  1824. {
  1825. struct parport_ip32_regs regs;
  1826. struct parport_ip32_private *priv = NULL;
  1827. struct parport_operations *ops = NULL;
  1828. struct parport *p = NULL;
  1829. int err;
  1830. parport_ip32_make_isa_registers(&regs, &mace->isa.parallel,
  1831. &mace->isa.ecp1284, 8 /* regshift */);
  1832. ops = kmalloc(sizeof(struct parport_operations), GFP_KERNEL);
  1833. priv = kmalloc(sizeof(struct parport_ip32_private), GFP_KERNEL);
  1834. p = parport_register_port(0, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, ops);
  1835. if (ops == NULL || priv == NULL || p == NULL) {
  1836. err = -ENOMEM;
  1837. goto fail;
  1838. }
  1839. p->base = MACE_BASE + offsetof(struct sgi_mace, isa.parallel);
  1840. p->base_hi = MACE_BASE + offsetof(struct sgi_mace, isa.ecp1284);
  1841. p->private_data = priv;
  1842. *ops = parport_ip32_ops;
  1843. *priv = (struct parport_ip32_private){
  1844. .regs = regs,
  1845. .dcr_writable = DCR_DIR | DCR_SELECT | DCR_nINIT |
  1846. DCR_AUTOFD | DCR_STROBE,
  1847. .irq_mode = PARPORT_IP32_IRQ_FWD,
  1848. };
  1849. init_completion(&priv->irq_complete);
  1850. /* Probe port. */
  1851. if (!parport_ip32_ecp_supported(p)) {
  1852. err = -ENODEV;
  1853. goto fail;
  1854. }
  1855. parport_ip32_dump_state(p, "begin init", 0);
  1856. /* We found what looks like a working ECR register. Simply assume
  1857. * that all modes are correctly supported. Enable basic modes. */
  1858. p->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT;
  1859. p->modes |= PARPORT_MODE_TRISTATE;
  1860. if (!parport_ip32_fifo_supported(p)) {
  1861. printk(KERN_WARNING PPIP32
  1862. "%s: error: FIFO disabled\n", p->name);
  1863. /* Disable hardware modes depending on a working FIFO. */
  1864. features &= ~PARPORT_IP32_ENABLE_SPP;
  1865. features &= ~PARPORT_IP32_ENABLE_ECP;
  1866. /* DMA is not needed if FIFO is not supported. */
  1867. features &= ~PARPORT_IP32_ENABLE_DMA;
  1868. }
  1869. /* Request IRQ */
  1870. if (features & PARPORT_IP32_ENABLE_IRQ) {
  1871. int irq = MACEISA_PARALLEL_IRQ;
  1872. if (request_irq(irq, parport_ip32_interrupt, 0, p->name, p)) {
  1873. printk(KERN_WARNING PPIP32
  1874. "%s: error: IRQ disabled\n", p->name);
  1875. /* DMA cannot work without interrupts. */
  1876. features &= ~PARPORT_IP32_ENABLE_DMA;
  1877. } else {
  1878. pr_probe(p, "Interrupt support enabled\n");
  1879. p->irq = irq;
  1880. priv->dcr_writable |= DCR_IRQ;
  1881. }
  1882. }
  1883. /* Allocate DMA resources */
  1884. if (features & PARPORT_IP32_ENABLE_DMA) {
  1885. if (parport_ip32_dma_register())
  1886. printk(KERN_WARNING PPIP32
  1887. "%s: error: DMA disabled\n", p->name);
  1888. else {
  1889. pr_probe(p, "DMA support enabled\n");
  1890. p->dma = 0; /* arbitrary value != PARPORT_DMA_NONE */
  1891. p->modes |= PARPORT_MODE_DMA;
  1892. }
  1893. }
  1894. if (features & PARPORT_IP32_ENABLE_SPP) {
  1895. /* Enable compatibility FIFO mode */
  1896. p->ops->compat_write_data = parport_ip32_compat_write_data;
  1897. p->modes |= PARPORT_MODE_COMPAT;
  1898. pr_probe(p, "Hardware support for SPP mode enabled\n");
  1899. }
  1900. if (features & PARPORT_IP32_ENABLE_EPP) {
  1901. /* Set up access functions to use EPP hardware. */
  1902. p->ops->epp_read_data = parport_ip32_epp_read_data;
  1903. p->ops->epp_write_data = parport_ip32_epp_write_data;
  1904. p->ops->epp_read_addr = parport_ip32_epp_read_addr;
  1905. p->ops->epp_write_addr = parport_ip32_epp_write_addr;
  1906. p->modes |= PARPORT_MODE_EPP;
  1907. pr_probe(p, "Hardware support for EPP mode enabled\n");
  1908. }
  1909. if (features & PARPORT_IP32_ENABLE_ECP) {
  1910. /* Enable ECP FIFO mode */
  1911. p->ops->ecp_write_data = parport_ip32_ecp_write_data;
  1912. /* FIXME - not implemented */
  1913. /* p->ops->ecp_read_data = parport_ip32_ecp_read_data; */
  1914. /* p->ops->ecp_write_addr = parport_ip32_ecp_write_addr; */
  1915. p->modes |= PARPORT_MODE_ECP;
  1916. pr_probe(p, "Hardware support for ECP mode enabled\n");
  1917. }
  1918. /* Initialize the port with sensible values */
  1919. parport_ip32_set_mode(p, ECR_MODE_PS2);
  1920. parport_ip32_write_control(p, DCR_SELECT | DCR_nINIT);
  1921. parport_ip32_data_forward(p);
  1922. parport_ip32_disable_irq(p);
  1923. parport_ip32_write_data(p, 0x00);
  1924. parport_ip32_dump_state(p, "end init", 0);
  1925. /* Print out what we found */
  1926. printk(KERN_INFO "%s: SGI IP32 at 0x%lx (0x%lx)",
  1927. p->name, p->base, p->base_hi);
  1928. if (p->irq != PARPORT_IRQ_NONE)
  1929. printk(", irq %d", p->irq);
  1930. printk(" [");
  1931. #define printmode(x) if (p->modes & PARPORT_MODE_##x) \
  1932. printk("%s%s", f++ ? "," : "", #x)
  1933. {
  1934. unsigned int f = 0;
  1935. printmode(PCSPP);
  1936. printmode(TRISTATE);
  1937. printmode(COMPAT);
  1938. printmode(EPP);
  1939. printmode(ECP);
  1940. printmode(DMA);
  1941. }
  1942. #undef printmode
  1943. printk("]\n");
  1944. parport_announce_port(p);
  1945. return p;
  1946. fail:
  1947. if (p)
  1948. parport_put_port(p);
  1949. kfree(priv);
  1950. kfree(ops);
  1951. return ERR_PTR(err);
  1952. }
  1953. /**
  1954. * parport_ip32_unregister_port - unregister a parallel port
  1955. * @p: pointer to the &struct parport
  1956. *
  1957. * Unregisters a parallel port and free previously allocated resources
  1958. * (memory, IRQ, ...).
  1959. */
  1960. static __exit void parport_ip32_unregister_port(struct parport *p)
  1961. {
  1962. struct parport_ip32_private * const priv = p->physport->private_data;
  1963. struct parport_operations *ops = p->ops;
  1964. parport_remove_port(p);
  1965. if (p->modes & PARPORT_MODE_DMA)
  1966. parport_ip32_dma_unregister();
  1967. if (p->irq != PARPORT_IRQ_NONE)
  1968. free_irq(p->irq, p);
  1969. parport_put_port(p);
  1970. kfree(priv);
  1971. kfree(ops);
  1972. }
  1973. /**
  1974. * parport_ip32_init - module initialization function
  1975. */
  1976. static int __init parport_ip32_init(void)
  1977. {
  1978. pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n");
  1979. this_port = parport_ip32_probe_port();
  1980. return PTR_ERR_OR_ZERO(this_port);
  1981. }
  1982. /**
  1983. * parport_ip32_exit - module termination function
  1984. */
  1985. static void __exit parport_ip32_exit(void)
  1986. {
  1987. parport_ip32_unregister_port(this_port);
  1988. }
  1989. /*--- Module stuff -----------------------------------------------------*/
  1990. MODULE_AUTHOR("Arnaud Giersch <arnaud.giersch@free.fr>");
  1991. MODULE_DESCRIPTION("SGI IP32 built-in parallel port driver");
  1992. MODULE_LICENSE("GPL");
  1993. MODULE_VERSION("0.6"); /* update in parport_ip32_init() too */
  1994. module_init(parport_ip32_init);
  1995. module_exit(parport_ip32_exit);
  1996. module_param(verbose_probing, bool, S_IRUGO);
  1997. MODULE_PARM_DESC(verbose_probing, "Log chit-chat during initialization");
  1998. module_param(features, uint, S_IRUGO);
  1999. MODULE_PARM_DESC(features,
  2000. "Bit mask of features to enable"
  2001. ", bit 0: IRQ support"
  2002. ", bit 1: DMA support"
  2003. ", bit 2: hardware SPP mode"
  2004. ", bit 3: hardware EPP mode"
  2005. ", bit 4: hardware ECP mode");
  2006. /*--- Inform (X)Emacs about preferred coding style ---------------------*/
  2007. /*
  2008. * Local Variables:
  2009. * mode: c
  2010. * c-file-style: "linux"
  2011. * indent-tabs-mode: t
  2012. * tab-width: 8
  2013. * fill-column: 78
  2014. * ispell-local-dictionary: "american"
  2015. * End:
  2016. */