fsl_rmu.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112
  1. /*
  2. * Freescale MPC85xx/MPC86xx RapidIO RMU support
  3. *
  4. * Copyright 2009 Sysgo AG
  5. * Thomas Moll <thomas.moll@sysgo.com>
  6. * - fixed maintenance access routines, check for aligned access
  7. *
  8. * Copyright 2009 Integrated Device Technology, Inc.
  9. * Alex Bounine <alexandre.bounine@idt.com>
  10. * - Added Port-Write message handling
  11. * - Added Machine Check exception handling
  12. *
  13. * Copyright (C) 2007, 2008, 2010, 2011 Freescale Semiconductor, Inc.
  14. * Zhang Wei <wei.zhang@freescale.com>
  15. * Lian Minghuan-B31939 <Minghuan.Lian@freescale.com>
  16. * Liu Gang <Gang.Liu@freescale.com>
  17. *
  18. * Copyright 2005 MontaVista Software, Inc.
  19. * Matt Porter <mporter@kernel.crashing.org>
  20. *
  21. * This program is free software; you can redistribute it and/or modify it
  22. * under the terms of the GNU General Public License as published by the
  23. * Free Software Foundation; either version 2 of the License, or (at your
  24. * option) any later version.
  25. */
  26. #include <linux/types.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/of_irq.h>
  30. #include <linux/of_platform.h>
  31. #include <linux/slab.h>
  32. #include "fsl_rio.h"
  33. #define GET_RMM_HANDLE(mport) \
  34. (((struct rio_priv *)(mport->priv))->rmm_handle)
  35. /* RapidIO definition irq, which read from OF-tree */
  36. #define IRQ_RIO_PW(m) (((struct fsl_rio_pw *)(m))->pwirq)
  37. #define IRQ_RIO_BELL(m) (((struct fsl_rio_dbell *)(m))->bellirq)
  38. #define IRQ_RIO_TX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->txirq)
  39. #define IRQ_RIO_RX(m) (((struct fsl_rmu *)(GET_RMM_HANDLE(m)))->rxirq)
  40. #define RIO_MIN_TX_RING_SIZE 2
  41. #define RIO_MAX_TX_RING_SIZE 2048
  42. #define RIO_MIN_RX_RING_SIZE 2
  43. #define RIO_MAX_RX_RING_SIZE 2048
  44. #define RIO_IPWMR_SEN 0x00100000
  45. #define RIO_IPWMR_QFIE 0x00000100
  46. #define RIO_IPWMR_EIE 0x00000020
  47. #define RIO_IPWMR_CQ 0x00000002
  48. #define RIO_IPWMR_PWE 0x00000001
  49. #define RIO_IPWSR_QF 0x00100000
  50. #define RIO_IPWSR_TE 0x00000080
  51. #define RIO_IPWSR_QFI 0x00000010
  52. #define RIO_IPWSR_PWD 0x00000008
  53. #define RIO_IPWSR_PWB 0x00000004
  54. #define RIO_EPWISR 0x10010
  55. /* EPWISR Error match value */
  56. #define RIO_EPWISR_PINT1 0x80000000
  57. #define RIO_EPWISR_PINT2 0x40000000
  58. #define RIO_EPWISR_MU 0x00000002
  59. #define RIO_EPWISR_PW 0x00000001
  60. #define IPWSR_CLEAR 0x98
  61. #define OMSR_CLEAR 0x1cb3
  62. #define IMSR_CLEAR 0x491
  63. #define IDSR_CLEAR 0x91
  64. #define ODSR_CLEAR 0x1c00
  65. #define LTLEECSR_ENABLE_ALL 0xFFC000FC
  66. #define RIO_LTLEECSR 0x060c
  67. #define RIO_IM0SR 0x64
  68. #define RIO_IM1SR 0x164
  69. #define RIO_OM0SR 0x4
  70. #define RIO_OM1SR 0x104
  71. #define RIO_DBELL_WIN_SIZE 0x1000
  72. #define RIO_MSG_OMR_MUI 0x00000002
  73. #define RIO_MSG_OSR_TE 0x00000080
  74. #define RIO_MSG_OSR_QOI 0x00000020
  75. #define RIO_MSG_OSR_QFI 0x00000010
  76. #define RIO_MSG_OSR_MUB 0x00000004
  77. #define RIO_MSG_OSR_EOMI 0x00000002
  78. #define RIO_MSG_OSR_QEI 0x00000001
  79. #define RIO_MSG_IMR_MI 0x00000002
  80. #define RIO_MSG_ISR_TE 0x00000080
  81. #define RIO_MSG_ISR_QFI 0x00000010
  82. #define RIO_MSG_ISR_DIQI 0x00000001
  83. #define RIO_MSG_DESC_SIZE 32
  84. #define RIO_MSG_BUFFER_SIZE 4096
  85. #define DOORBELL_DMR_DI 0x00000002
  86. #define DOORBELL_DSR_TE 0x00000080
  87. #define DOORBELL_DSR_QFI 0x00000010
  88. #define DOORBELL_DSR_DIQI 0x00000001
  89. #define DOORBELL_MESSAGE_SIZE 0x08
  90. struct rio_msg_regs {
  91. u32 omr;
  92. u32 osr;
  93. u32 pad1;
  94. u32 odqdpar;
  95. u32 pad2;
  96. u32 osar;
  97. u32 odpr;
  98. u32 odatr;
  99. u32 odcr;
  100. u32 pad3;
  101. u32 odqepar;
  102. u32 pad4[13];
  103. u32 imr;
  104. u32 isr;
  105. u32 pad5;
  106. u32 ifqdpar;
  107. u32 pad6;
  108. u32 ifqepar;
  109. };
  110. struct rio_dbell_regs {
  111. u32 odmr;
  112. u32 odsr;
  113. u32 pad1[4];
  114. u32 oddpr;
  115. u32 oddatr;
  116. u32 pad2[3];
  117. u32 odretcr;
  118. u32 pad3[12];
  119. u32 dmr;
  120. u32 dsr;
  121. u32 pad4;
  122. u32 dqdpar;
  123. u32 pad5;
  124. u32 dqepar;
  125. };
  126. struct rio_pw_regs {
  127. u32 pwmr;
  128. u32 pwsr;
  129. u32 epwqbar;
  130. u32 pwqbar;
  131. };
  132. struct rio_tx_desc {
  133. u32 pad1;
  134. u32 saddr;
  135. u32 dport;
  136. u32 dattr;
  137. u32 pad2;
  138. u32 pad3;
  139. u32 dwcnt;
  140. u32 pad4;
  141. };
  142. struct rio_msg_tx_ring {
  143. void *virt;
  144. dma_addr_t phys;
  145. void *virt_buffer[RIO_MAX_TX_RING_SIZE];
  146. dma_addr_t phys_buffer[RIO_MAX_TX_RING_SIZE];
  147. int tx_slot;
  148. int size;
  149. void *dev_id;
  150. };
  151. struct rio_msg_rx_ring {
  152. void *virt;
  153. dma_addr_t phys;
  154. void *virt_buffer[RIO_MAX_RX_RING_SIZE];
  155. int rx_slot;
  156. int size;
  157. void *dev_id;
  158. };
  159. struct fsl_rmu {
  160. struct rio_msg_regs __iomem *msg_regs;
  161. struct rio_msg_tx_ring msg_tx_ring;
  162. struct rio_msg_rx_ring msg_rx_ring;
  163. int txirq;
  164. int rxirq;
  165. };
  166. struct rio_dbell_msg {
  167. u16 pad1;
  168. u16 tid;
  169. u16 sid;
  170. u16 info;
  171. };
  172. /**
  173. * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
  174. * @irq: Linux interrupt number
  175. * @dev_instance: Pointer to interrupt-specific data
  176. *
  177. * Handles outbound message interrupts. Executes a register outbound
  178. * mailbox event handler and acks the interrupt occurrence.
  179. */
  180. static irqreturn_t
  181. fsl_rio_tx_handler(int irq, void *dev_instance)
  182. {
  183. int osr;
  184. struct rio_mport *port = (struct rio_mport *)dev_instance;
  185. struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
  186. osr = in_be32(&rmu->msg_regs->osr);
  187. if (osr & RIO_MSG_OSR_TE) {
  188. pr_info("RIO: outbound message transmission error\n");
  189. out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_TE);
  190. goto out;
  191. }
  192. if (osr & RIO_MSG_OSR_QOI) {
  193. pr_info("RIO: outbound message queue overflow\n");
  194. out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_QOI);
  195. goto out;
  196. }
  197. if (osr & RIO_MSG_OSR_EOMI) {
  198. u32 dqp = in_be32(&rmu->msg_regs->odqdpar);
  199. int slot = (dqp - rmu->msg_tx_ring.phys) >> 5;
  200. if (port->outb_msg[0].mcback != NULL) {
  201. port->outb_msg[0].mcback(port, rmu->msg_tx_ring.dev_id,
  202. -1,
  203. slot);
  204. }
  205. /* Ack the end-of-message interrupt */
  206. out_be32(&rmu->msg_regs->osr, RIO_MSG_OSR_EOMI);
  207. }
  208. out:
  209. return IRQ_HANDLED;
  210. }
  211. /**
  212. * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
  213. * @irq: Linux interrupt number
  214. * @dev_instance: Pointer to interrupt-specific data
  215. *
  216. * Handles inbound message interrupts. Executes a registered inbound
  217. * mailbox event handler and acks the interrupt occurrence.
  218. */
  219. static irqreturn_t
  220. fsl_rio_rx_handler(int irq, void *dev_instance)
  221. {
  222. int isr;
  223. struct rio_mport *port = (struct rio_mport *)dev_instance;
  224. struct fsl_rmu *rmu = GET_RMM_HANDLE(port);
  225. isr = in_be32(&rmu->msg_regs->isr);
  226. if (isr & RIO_MSG_ISR_TE) {
  227. pr_info("RIO: inbound message reception error\n");
  228. out_be32((void *)&rmu->msg_regs->isr, RIO_MSG_ISR_TE);
  229. goto out;
  230. }
  231. /* XXX Need to check/dispatch until queue empty */
  232. if (isr & RIO_MSG_ISR_DIQI) {
  233. /*
  234. * Can receive messages for any mailbox/letter to that
  235. * mailbox destination. So, make the callback with an
  236. * unknown/invalid mailbox number argument.
  237. */
  238. if (port->inb_msg[0].mcback != NULL)
  239. port->inb_msg[0].mcback(port, rmu->msg_rx_ring.dev_id,
  240. -1,
  241. -1);
  242. /* Ack the queueing interrupt */
  243. out_be32(&rmu->msg_regs->isr, RIO_MSG_ISR_DIQI);
  244. }
  245. out:
  246. return IRQ_HANDLED;
  247. }
  248. /**
  249. * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
  250. * @irq: Linux interrupt number
  251. * @dev_instance: Pointer to interrupt-specific data
  252. *
  253. * Handles doorbell interrupts. Parses a list of registered
  254. * doorbell event handlers and executes a matching event handler.
  255. */
  256. static irqreturn_t
  257. fsl_rio_dbell_handler(int irq, void *dev_instance)
  258. {
  259. int dsr;
  260. struct fsl_rio_dbell *fsl_dbell = (struct fsl_rio_dbell *)dev_instance;
  261. int i;
  262. dsr = in_be32(&fsl_dbell->dbell_regs->dsr);
  263. if (dsr & DOORBELL_DSR_TE) {
  264. pr_info("RIO: doorbell reception error\n");
  265. out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_TE);
  266. goto out;
  267. }
  268. if (dsr & DOORBELL_DSR_QFI) {
  269. pr_info("RIO: doorbell queue full\n");
  270. out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_QFI);
  271. }
  272. /* XXX Need to check/dispatch until queue empty */
  273. if (dsr & DOORBELL_DSR_DIQI) {
  274. struct rio_dbell_msg *dmsg =
  275. fsl_dbell->dbell_ring.virt +
  276. (in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff);
  277. struct rio_dbell *dbell;
  278. int found = 0;
  279. pr_debug
  280. ("RIO: processing doorbell,"
  281. " sid %2.2x tid %2.2x info %4.4x\n",
  282. dmsg->sid, dmsg->tid, dmsg->info);
  283. for (i = 0; i < MAX_PORT_NUM; i++) {
  284. if (fsl_dbell->mport[i]) {
  285. list_for_each_entry(dbell,
  286. &fsl_dbell->mport[i]->dbells, node) {
  287. if ((dbell->res->start
  288. <= dmsg->info)
  289. && (dbell->res->end
  290. >= dmsg->info)) {
  291. found = 1;
  292. break;
  293. }
  294. }
  295. if (found && dbell->dinb) {
  296. dbell->dinb(fsl_dbell->mport[i],
  297. dbell->dev_id, dmsg->sid,
  298. dmsg->tid,
  299. dmsg->info);
  300. break;
  301. }
  302. }
  303. }
  304. if (!found) {
  305. pr_debug
  306. ("RIO: spurious doorbell,"
  307. " sid %2.2x tid %2.2x info %4.4x\n",
  308. dmsg->sid, dmsg->tid,
  309. dmsg->info);
  310. }
  311. setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI);
  312. out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI);
  313. }
  314. out:
  315. return IRQ_HANDLED;
  316. }
  317. void msg_unit_error_handler(void)
  318. {
  319. /*XXX: Error recovery is not implemented, we just clear errors */
  320. out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
  321. out_be32((u32 *)(rmu_regs_win + RIO_IM0SR), IMSR_CLEAR);
  322. out_be32((u32 *)(rmu_regs_win + RIO_IM1SR), IMSR_CLEAR);
  323. out_be32((u32 *)(rmu_regs_win + RIO_OM0SR), OMSR_CLEAR);
  324. out_be32((u32 *)(rmu_regs_win + RIO_OM1SR), OMSR_CLEAR);
  325. out_be32(&dbell->dbell_regs->odsr, ODSR_CLEAR);
  326. out_be32(&dbell->dbell_regs->dsr, IDSR_CLEAR);
  327. out_be32(&pw->pw_regs->pwsr, IPWSR_CLEAR);
  328. }
  329. /**
  330. * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
  331. * @irq: Linux interrupt number
  332. * @dev_instance: Pointer to interrupt-specific data
  333. *
  334. * Handles port write interrupts. Parses a list of registered
  335. * port write event handlers and executes a matching event handler.
  336. */
  337. static irqreturn_t
  338. fsl_rio_port_write_handler(int irq, void *dev_instance)
  339. {
  340. u32 ipwmr, ipwsr;
  341. struct fsl_rio_pw *pw = (struct fsl_rio_pw *)dev_instance;
  342. u32 epwisr, tmp;
  343. epwisr = in_be32(rio_regs_win + RIO_EPWISR);
  344. if (!(epwisr & RIO_EPWISR_PW))
  345. goto pw_done;
  346. ipwmr = in_be32(&pw->pw_regs->pwmr);
  347. ipwsr = in_be32(&pw->pw_regs->pwsr);
  348. #ifdef DEBUG_PW
  349. pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr);
  350. if (ipwsr & RIO_IPWSR_QF)
  351. pr_debug(" QF");
  352. if (ipwsr & RIO_IPWSR_TE)
  353. pr_debug(" TE");
  354. if (ipwsr & RIO_IPWSR_QFI)
  355. pr_debug(" QFI");
  356. if (ipwsr & RIO_IPWSR_PWD)
  357. pr_debug(" PWD");
  358. if (ipwsr & RIO_IPWSR_PWB)
  359. pr_debug(" PWB");
  360. pr_debug(" )\n");
  361. #endif
  362. /* Schedule deferred processing if PW was received */
  363. if (ipwsr & RIO_IPWSR_QFI) {
  364. /* Save PW message (if there is room in FIFO),
  365. * otherwise discard it.
  366. */
  367. if (kfifo_avail(&pw->pw_fifo) >= RIO_PW_MSG_SIZE) {
  368. pw->port_write_msg.msg_count++;
  369. kfifo_in(&pw->pw_fifo, pw->port_write_msg.virt,
  370. RIO_PW_MSG_SIZE);
  371. } else {
  372. pw->port_write_msg.discard_count++;
  373. pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n",
  374. pw->port_write_msg.discard_count);
  375. }
  376. /* Clear interrupt and issue Clear Queue command. This allows
  377. * another port-write to be received.
  378. */
  379. out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_QFI);
  380. out_be32(&pw->pw_regs->pwmr, ipwmr | RIO_IPWMR_CQ);
  381. schedule_work(&pw->pw_work);
  382. }
  383. if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) {
  384. pw->port_write_msg.err_count++;
  385. pr_debug("RIO: Port-Write Transaction Err (%d)\n",
  386. pw->port_write_msg.err_count);
  387. /* Clear Transaction Error: port-write controller should be
  388. * disabled when clearing this error
  389. */
  390. out_be32(&pw->pw_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE);
  391. out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_TE);
  392. out_be32(&pw->pw_regs->pwmr, ipwmr);
  393. }
  394. if (ipwsr & RIO_IPWSR_PWD) {
  395. pw->port_write_msg.discard_count++;
  396. pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n",
  397. pw->port_write_msg.discard_count);
  398. out_be32(&pw->pw_regs->pwsr, RIO_IPWSR_PWD);
  399. }
  400. pw_done:
  401. if (epwisr & RIO_EPWISR_PINT1) {
  402. tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
  403. pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
  404. fsl_rio_port_error_handler(0);
  405. }
  406. if (epwisr & RIO_EPWISR_PINT2) {
  407. tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
  408. pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
  409. fsl_rio_port_error_handler(1);
  410. }
  411. if (epwisr & RIO_EPWISR_MU) {
  412. tmp = in_be32(rio_regs_win + RIO_LTLEDCSR);
  413. pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
  414. msg_unit_error_handler();
  415. }
  416. return IRQ_HANDLED;
  417. }
  418. static void fsl_pw_dpc(struct work_struct *work)
  419. {
  420. struct fsl_rio_pw *pw = container_of(work, struct fsl_rio_pw, pw_work);
  421. union rio_pw_msg msg_buffer;
  422. int i;
  423. /*
  424. * Process port-write messages
  425. */
  426. while (kfifo_out_spinlocked(&pw->pw_fifo, (unsigned char *)&msg_buffer,
  427. RIO_PW_MSG_SIZE, &pw->pw_fifo_lock)) {
  428. #ifdef DEBUG_PW
  429. {
  430. u32 i;
  431. pr_debug("%s : Port-Write Message:", __func__);
  432. for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); i++) {
  433. if ((i%4) == 0)
  434. pr_debug("\n0x%02x: 0x%08x", i*4,
  435. msg_buffer.raw[i]);
  436. else
  437. pr_debug(" 0x%08x", msg_buffer.raw[i]);
  438. }
  439. pr_debug("\n");
  440. }
  441. #endif
  442. /* Pass the port-write message to RIO core for processing */
  443. for (i = 0; i < MAX_PORT_NUM; i++) {
  444. if (pw->mport[i])
  445. rio_inb_pwrite_handler(pw->mport[i],
  446. &msg_buffer);
  447. }
  448. }
  449. }
  450. /**
  451. * fsl_rio_pw_enable - enable/disable port-write interface init
  452. * @mport: Master port implementing the port write unit
  453. * @enable: 1=enable; 0=disable port-write message handling
  454. */
  455. int fsl_rio_pw_enable(struct rio_mport *mport, int enable)
  456. {
  457. u32 rval;
  458. rval = in_be32(&pw->pw_regs->pwmr);
  459. if (enable)
  460. rval |= RIO_IPWMR_PWE;
  461. else
  462. rval &= ~RIO_IPWMR_PWE;
  463. out_be32(&pw->pw_regs->pwmr, rval);
  464. return 0;
  465. }
  466. /**
  467. * fsl_rio_port_write_init - MPC85xx port write interface init
  468. * @mport: Master port implementing the port write unit
  469. *
  470. * Initializes port write unit hardware and DMA buffer
  471. * ring. Called from fsl_rio_setup(). Returns %0 on success
  472. * or %-ENOMEM on failure.
  473. */
  474. int fsl_rio_port_write_init(struct fsl_rio_pw *pw)
  475. {
  476. int rc = 0;
  477. /* Following configurations require a disabled port write controller */
  478. out_be32(&pw->pw_regs->pwmr,
  479. in_be32(&pw->pw_regs->pwmr) & ~RIO_IPWMR_PWE);
  480. /* Initialize port write */
  481. pw->port_write_msg.virt = dma_alloc_coherent(pw->dev,
  482. RIO_PW_MSG_SIZE,
  483. &pw->port_write_msg.phys, GFP_KERNEL);
  484. if (!pw->port_write_msg.virt) {
  485. pr_err("RIO: unable allocate port write queue\n");
  486. return -ENOMEM;
  487. }
  488. pw->port_write_msg.err_count = 0;
  489. pw->port_write_msg.discard_count = 0;
  490. /* Point dequeue/enqueue pointers at first entry */
  491. out_be32(&pw->pw_regs->epwqbar, 0);
  492. out_be32(&pw->pw_regs->pwqbar, (u32) pw->port_write_msg.phys);
  493. pr_debug("EIPWQBAR: 0x%08x IPWQBAR: 0x%08x\n",
  494. in_be32(&pw->pw_regs->epwqbar),
  495. in_be32(&pw->pw_regs->pwqbar));
  496. /* Clear interrupt status IPWSR */
  497. out_be32(&pw->pw_regs->pwsr,
  498. (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD));
  499. /* Configure port write controller for snooping enable all reporting,
  500. clear queue full */
  501. out_be32(&pw->pw_regs->pwmr,
  502. RIO_IPWMR_SEN | RIO_IPWMR_QFIE | RIO_IPWMR_EIE | RIO_IPWMR_CQ);
  503. /* Hook up port-write handler */
  504. rc = request_irq(IRQ_RIO_PW(pw), fsl_rio_port_write_handler,
  505. IRQF_SHARED, "port-write", (void *)pw);
  506. if (rc < 0) {
  507. pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
  508. goto err_out;
  509. }
  510. /* Enable Error Interrupt */
  511. out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
  512. INIT_WORK(&pw->pw_work, fsl_pw_dpc);
  513. spin_lock_init(&pw->pw_fifo_lock);
  514. if (kfifo_alloc(&pw->pw_fifo, RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
  515. pr_err("FIFO allocation failed\n");
  516. rc = -ENOMEM;
  517. goto err_out_irq;
  518. }
  519. pr_debug("IPWMR: 0x%08x IPWSR: 0x%08x\n",
  520. in_be32(&pw->pw_regs->pwmr),
  521. in_be32(&pw->pw_regs->pwsr));
  522. return rc;
  523. err_out_irq:
  524. free_irq(IRQ_RIO_PW(pw), (void *)pw);
  525. err_out:
  526. dma_free_coherent(pw->dev, RIO_PW_MSG_SIZE,
  527. pw->port_write_msg.virt,
  528. pw->port_write_msg.phys);
  529. return rc;
  530. }
  531. /**
  532. * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
  533. * @mport: RapidIO master port info
  534. * @index: ID of RapidIO interface
  535. * @destid: Destination ID of target device
  536. * @data: 16-bit info field of RapidIO doorbell message
  537. *
  538. * Sends a MPC85xx doorbell message. Returns %0 on success or
  539. * %-EINVAL on failure.
  540. */
  541. int fsl_rio_doorbell_send(struct rio_mport *mport,
  542. int index, u16 destid, u16 data)
  543. {
  544. pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
  545. index, destid, data);
  546. /* In the serial version silicons, such as MPC8548, MPC8641,
  547. * below operations is must be.
  548. */
  549. out_be32(&dbell->dbell_regs->odmr, 0x00000000);
  550. out_be32(&dbell->dbell_regs->odretcr, 0x00000004);
  551. out_be32(&dbell->dbell_regs->oddpr, destid << 16);
  552. out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data);
  553. out_be32(&dbell->dbell_regs->odmr, 0x00000001);
  554. return 0;
  555. }
  556. /**
  557. * fsl_add_outb_message - Add message to the MPC85xx outbound message queue
  558. * @mport: Master port with outbound message queue
  559. * @rdev: Target of outbound message
  560. * @mbox: Outbound mailbox
  561. * @buffer: Message to add to outbound queue
  562. * @len: Length of message
  563. *
  564. * Adds the @buffer message to the MPC85xx outbound message queue. Returns
  565. * %0 on success or %-EINVAL on failure.
  566. */
  567. int
  568. fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
  569. void *buffer, size_t len)
  570. {
  571. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  572. u32 omr;
  573. struct rio_tx_desc *desc = (struct rio_tx_desc *)rmu->msg_tx_ring.virt
  574. + rmu->msg_tx_ring.tx_slot;
  575. int ret = 0;
  576. pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
  577. "%p len %8.8zx\n", rdev->destid, mbox, buffer, len);
  578. if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
  579. ret = -EINVAL;
  580. goto out;
  581. }
  582. /* Copy and clear rest of buffer */
  583. memcpy(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot], buffer,
  584. len);
  585. if (len < (RIO_MAX_MSG_SIZE - 4))
  586. memset(rmu->msg_tx_ring.virt_buffer[rmu->msg_tx_ring.tx_slot]
  587. + len, 0, RIO_MAX_MSG_SIZE - len);
  588. /* Set mbox field for message, and set destid */
  589. desc->dport = (rdev->destid << 16) | (mbox & 0x3);
  590. /* Enable EOMI interrupt and priority */
  591. desc->dattr = 0x28000000 | ((mport->index) << 20);
  592. /* Set transfer size aligned to next power of 2 (in double words) */
  593. desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
  594. /* Set snooping and source buffer address */
  595. desc->saddr = 0x00000004
  596. | rmu->msg_tx_ring.phys_buffer[rmu->msg_tx_ring.tx_slot];
  597. /* Increment enqueue pointer */
  598. omr = in_be32(&rmu->msg_regs->omr);
  599. out_be32(&rmu->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
  600. /* Go to next descriptor */
  601. if (++rmu->msg_tx_ring.tx_slot == rmu->msg_tx_ring.size)
  602. rmu->msg_tx_ring.tx_slot = 0;
  603. out:
  604. return ret;
  605. }
  606. /**
  607. * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox
  608. * @mport: Master port implementing the outbound message unit
  609. * @dev_id: Device specific pointer to pass on event
  610. * @mbox: Mailbox to open
  611. * @entries: Number of entries in the outbound mailbox ring
  612. *
  613. * Initializes buffer ring, request the outbound message interrupt,
  614. * and enables the outbound message unit. Returns %0 on success and
  615. * %-EINVAL or %-ENOMEM on failure.
  616. */
  617. int
  618. fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
  619. {
  620. int i, j, rc = 0;
  621. struct rio_priv *priv = mport->priv;
  622. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  623. if ((entries < RIO_MIN_TX_RING_SIZE) ||
  624. (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
  625. rc = -EINVAL;
  626. goto out;
  627. }
  628. /* Initialize shadow copy ring */
  629. rmu->msg_tx_ring.dev_id = dev_id;
  630. rmu->msg_tx_ring.size = entries;
  631. for (i = 0; i < rmu->msg_tx_ring.size; i++) {
  632. rmu->msg_tx_ring.virt_buffer[i] =
  633. dma_alloc_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
  634. &rmu->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
  635. if (!rmu->msg_tx_ring.virt_buffer[i]) {
  636. rc = -ENOMEM;
  637. for (j = 0; j < rmu->msg_tx_ring.size; j++)
  638. if (rmu->msg_tx_ring.virt_buffer[j])
  639. dma_free_coherent(priv->dev,
  640. RIO_MSG_BUFFER_SIZE,
  641. rmu->msg_tx_ring.
  642. virt_buffer[j],
  643. rmu->msg_tx_ring.
  644. phys_buffer[j]);
  645. goto out;
  646. }
  647. }
  648. /* Initialize outbound message descriptor ring */
  649. rmu->msg_tx_ring.virt = dma_alloc_coherent(priv->dev,
  650. rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
  651. &rmu->msg_tx_ring.phys, GFP_KERNEL);
  652. if (!rmu->msg_tx_ring.virt) {
  653. rc = -ENOMEM;
  654. goto out_dma;
  655. }
  656. memset(rmu->msg_tx_ring.virt, 0,
  657. rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE);
  658. rmu->msg_tx_ring.tx_slot = 0;
  659. /* Point dequeue/enqueue pointers at first entry in ring */
  660. out_be32(&rmu->msg_regs->odqdpar, rmu->msg_tx_ring.phys);
  661. out_be32(&rmu->msg_regs->odqepar, rmu->msg_tx_ring.phys);
  662. /* Configure for snooping */
  663. out_be32(&rmu->msg_regs->osar, 0x00000004);
  664. /* Clear interrupt status */
  665. out_be32(&rmu->msg_regs->osr, 0x000000b3);
  666. /* Hook up outbound message handler */
  667. rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
  668. "msg_tx", (void *)mport);
  669. if (rc < 0)
  670. goto out_irq;
  671. /*
  672. * Configure outbound message unit
  673. * Snooping
  674. * Interrupts (all enabled, except QEIE)
  675. * Chaining mode
  676. * Disable
  677. */
  678. out_be32(&rmu->msg_regs->omr, 0x00100220);
  679. /* Set number of entries */
  680. out_be32(&rmu->msg_regs->omr,
  681. in_be32(&rmu->msg_regs->omr) |
  682. ((get_bitmask_order(entries) - 2) << 12));
  683. /* Now enable the unit */
  684. out_be32(&rmu->msg_regs->omr, in_be32(&rmu->msg_regs->omr) | 0x1);
  685. out:
  686. return rc;
  687. out_irq:
  688. dma_free_coherent(priv->dev,
  689. rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
  690. rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
  691. out_dma:
  692. for (i = 0; i < rmu->msg_tx_ring.size; i++)
  693. dma_free_coherent(priv->dev, RIO_MSG_BUFFER_SIZE,
  694. rmu->msg_tx_ring.virt_buffer[i],
  695. rmu->msg_tx_ring.phys_buffer[i]);
  696. return rc;
  697. }
  698. /**
  699. * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox
  700. * @mport: Master port implementing the outbound message unit
  701. * @mbox: Mailbox to close
  702. *
  703. * Disables the outbound message unit, free all buffers, and
  704. * frees the outbound message interrupt.
  705. */
  706. void fsl_close_outb_mbox(struct rio_mport *mport, int mbox)
  707. {
  708. struct rio_priv *priv = mport->priv;
  709. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  710. /* Disable inbound message unit */
  711. out_be32(&rmu->msg_regs->omr, 0);
  712. /* Free ring */
  713. dma_free_coherent(priv->dev,
  714. rmu->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
  715. rmu->msg_tx_ring.virt, rmu->msg_tx_ring.phys);
  716. /* Free interrupt */
  717. free_irq(IRQ_RIO_TX(mport), (void *)mport);
  718. }
  719. /**
  720. * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox
  721. * @mport: Master port implementing the inbound message unit
  722. * @dev_id: Device specific pointer to pass on event
  723. * @mbox: Mailbox to open
  724. * @entries: Number of entries in the inbound mailbox ring
  725. *
  726. * Initializes buffer ring, request the inbound message interrupt,
  727. * and enables the inbound message unit. Returns %0 on success
  728. * and %-EINVAL or %-ENOMEM on failure.
  729. */
  730. int
  731. fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
  732. {
  733. int i, rc = 0;
  734. struct rio_priv *priv = mport->priv;
  735. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  736. if ((entries < RIO_MIN_RX_RING_SIZE) ||
  737. (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
  738. rc = -EINVAL;
  739. goto out;
  740. }
  741. /* Initialize client buffer ring */
  742. rmu->msg_rx_ring.dev_id = dev_id;
  743. rmu->msg_rx_ring.size = entries;
  744. rmu->msg_rx_ring.rx_slot = 0;
  745. for (i = 0; i < rmu->msg_rx_ring.size; i++)
  746. rmu->msg_rx_ring.virt_buffer[i] = NULL;
  747. /* Initialize inbound message ring */
  748. rmu->msg_rx_ring.virt = dma_alloc_coherent(priv->dev,
  749. rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
  750. &rmu->msg_rx_ring.phys, GFP_KERNEL);
  751. if (!rmu->msg_rx_ring.virt) {
  752. rc = -ENOMEM;
  753. goto out;
  754. }
  755. /* Point dequeue/enqueue pointers at first entry in ring */
  756. out_be32(&rmu->msg_regs->ifqdpar, (u32) rmu->msg_rx_ring.phys);
  757. out_be32(&rmu->msg_regs->ifqepar, (u32) rmu->msg_rx_ring.phys);
  758. /* Clear interrupt status */
  759. out_be32(&rmu->msg_regs->isr, 0x00000091);
  760. /* Hook up inbound message handler */
  761. rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
  762. "msg_rx", (void *)mport);
  763. if (rc < 0) {
  764. dma_free_coherent(priv->dev,
  765. rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
  766. rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
  767. goto out;
  768. }
  769. /*
  770. * Configure inbound message unit:
  771. * Snooping
  772. * 4KB max message size
  773. * Unmask all interrupt sources
  774. * Disable
  775. */
  776. out_be32(&rmu->msg_regs->imr, 0x001b0060);
  777. /* Set number of queue entries */
  778. setbits32(&rmu->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
  779. /* Now enable the unit */
  780. setbits32(&rmu->msg_regs->imr, 0x1);
  781. out:
  782. return rc;
  783. }
  784. /**
  785. * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox
  786. * @mport: Master port implementing the inbound message unit
  787. * @mbox: Mailbox to close
  788. *
  789. * Disables the inbound message unit, free all buffers, and
  790. * frees the inbound message interrupt.
  791. */
  792. void fsl_close_inb_mbox(struct rio_mport *mport, int mbox)
  793. {
  794. struct rio_priv *priv = mport->priv;
  795. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  796. /* Disable inbound message unit */
  797. out_be32(&rmu->msg_regs->imr, 0);
  798. /* Free ring */
  799. dma_free_coherent(priv->dev, rmu->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
  800. rmu->msg_rx_ring.virt, rmu->msg_rx_ring.phys);
  801. /* Free interrupt */
  802. free_irq(IRQ_RIO_RX(mport), (void *)mport);
  803. }
  804. /**
  805. * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue
  806. * @mport: Master port implementing the inbound message unit
  807. * @mbox: Inbound mailbox number
  808. * @buf: Buffer to add to inbound queue
  809. *
  810. * Adds the @buf buffer to the MPC85xx inbound message queue. Returns
  811. * %0 on success or %-EINVAL on failure.
  812. */
  813. int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
  814. {
  815. int rc = 0;
  816. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  817. pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
  818. rmu->msg_rx_ring.rx_slot);
  819. if (rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot]) {
  820. printk(KERN_ERR
  821. "RIO: error adding inbound buffer %d, buffer exists\n",
  822. rmu->msg_rx_ring.rx_slot);
  823. rc = -EINVAL;
  824. goto out;
  825. }
  826. rmu->msg_rx_ring.virt_buffer[rmu->msg_rx_ring.rx_slot] = buf;
  827. if (++rmu->msg_rx_ring.rx_slot == rmu->msg_rx_ring.size)
  828. rmu->msg_rx_ring.rx_slot = 0;
  829. out:
  830. return rc;
  831. }
  832. /**
  833. * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit
  834. * @mport: Master port implementing the inbound message unit
  835. * @mbox: Inbound mailbox number
  836. *
  837. * Gets the next available inbound message from the inbound message queue.
  838. * A pointer to the message is returned on success or NULL on failure.
  839. */
  840. void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
  841. {
  842. struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
  843. u32 phys_buf;
  844. void *virt_buf;
  845. void *buf = NULL;
  846. int buf_idx;
  847. phys_buf = in_be32(&rmu->msg_regs->ifqdpar);
  848. /* If no more messages, then bail out */
  849. if (phys_buf == in_be32(&rmu->msg_regs->ifqepar))
  850. goto out2;
  851. virt_buf = rmu->msg_rx_ring.virt + (phys_buf
  852. - rmu->msg_rx_ring.phys);
  853. buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
  854. buf = rmu->msg_rx_ring.virt_buffer[buf_idx];
  855. if (!buf) {
  856. printk(KERN_ERR
  857. "RIO: inbound message copy failed, no buffers\n");
  858. goto out1;
  859. }
  860. /* Copy max message size, caller is expected to allocate that big */
  861. memcpy(buf, virt_buf, RIO_MAX_MSG_SIZE);
  862. /* Clear the available buffer */
  863. rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL;
  864. out1:
  865. setbits32(&rmu->msg_regs->imr, RIO_MSG_IMR_MI);
  866. out2:
  867. return buf;
  868. }
  869. /**
  870. * fsl_rio_doorbell_init - MPC85xx doorbell interface init
  871. * @mport: Master port implementing the inbound doorbell unit
  872. *
  873. * Initializes doorbell unit hardware and inbound DMA buffer
  874. * ring. Called from fsl_rio_setup(). Returns %0 on success
  875. * or %-ENOMEM on failure.
  876. */
  877. int fsl_rio_doorbell_init(struct fsl_rio_dbell *dbell)
  878. {
  879. int rc = 0;
  880. /* Initialize inbound doorbells */
  881. dbell->dbell_ring.virt = dma_alloc_coherent(dbell->dev, 512 *
  882. DOORBELL_MESSAGE_SIZE, &dbell->dbell_ring.phys, GFP_KERNEL);
  883. if (!dbell->dbell_ring.virt) {
  884. printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
  885. rc = -ENOMEM;
  886. goto out;
  887. }
  888. /* Point dequeue/enqueue pointers at first entry in ring */
  889. out_be32(&dbell->dbell_regs->dqdpar, (u32) dbell->dbell_ring.phys);
  890. out_be32(&dbell->dbell_regs->dqepar, (u32) dbell->dbell_ring.phys);
  891. /* Clear interrupt status */
  892. out_be32(&dbell->dbell_regs->dsr, 0x00000091);
  893. /* Hook up doorbell handler */
  894. rc = request_irq(IRQ_RIO_BELL(dbell), fsl_rio_dbell_handler, 0,
  895. "dbell_rx", (void *)dbell);
  896. if (rc < 0) {
  897. dma_free_coherent(dbell->dev, 512 * DOORBELL_MESSAGE_SIZE,
  898. dbell->dbell_ring.virt, dbell->dbell_ring.phys);
  899. printk(KERN_ERR
  900. "MPC85xx RIO: unable to request inbound doorbell irq");
  901. goto out;
  902. }
  903. /* Configure doorbells for snooping, 512 entries, and enable */
  904. out_be32(&dbell->dbell_regs->dmr, 0x00108161);
  905. out:
  906. return rc;
  907. }
  908. int fsl_rio_setup_rmu(struct rio_mport *mport, struct device_node *node)
  909. {
  910. struct rio_priv *priv;
  911. struct fsl_rmu *rmu;
  912. u64 msg_start;
  913. const u32 *msg_addr;
  914. int mlen;
  915. int aw;
  916. if (!mport || !mport->priv)
  917. return -EINVAL;
  918. priv = mport->priv;
  919. if (!node) {
  920. dev_warn(priv->dev, "Can't get %s property 'fsl,rmu'\n",
  921. priv->dev->of_node->full_name);
  922. return -EINVAL;
  923. }
  924. rmu = kzalloc(sizeof(struct fsl_rmu), GFP_KERNEL);
  925. if (!rmu)
  926. return -ENOMEM;
  927. aw = of_n_addr_cells(node);
  928. msg_addr = of_get_property(node, "reg", &mlen);
  929. if (!msg_addr) {
  930. pr_err("%s: unable to find 'reg' property of message-unit\n",
  931. node->full_name);
  932. kfree(rmu);
  933. return -ENOMEM;
  934. }
  935. msg_start = of_read_number(msg_addr, aw);
  936. rmu->msg_regs = (struct rio_msg_regs *)
  937. (rmu_regs_win + (u32)msg_start);
  938. rmu->txirq = irq_of_parse_and_map(node, 0);
  939. rmu->rxirq = irq_of_parse_and_map(node, 1);
  940. printk(KERN_INFO "%s: txirq: %d, rxirq %d\n",
  941. node->full_name, rmu->txirq, rmu->rxirq);
  942. priv->rmm_handle = rmu;
  943. rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
  944. rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
  945. rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
  946. return 0;
  947. }