wcxb.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058
  1. /*
  2. * wcxb SPI library
  3. *
  4. * Copyright (C) 2013 Digium, Inc.
  5. *
  6. * All rights reserved.
  7. *
  8. */
  9. /*
  10. * See http://www.asterisk.org for more information about
  11. * the Asterisk project. Please do not directly contact
  12. * any of the maintainers of this project for assistance;
  13. * the project provides a web site, mailing lists and IRC
  14. * channels for your use.
  15. *
  16. * This program is free software, distributed under the terms of
  17. * the GNU General Public License Version 2 as published by the
  18. * Free Software Foundation. See the LICENSE file included with
  19. * this program for more details.
  20. */
  21. #include <linux/kernel.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/errno.h>
  24. #include <linux/pci.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/firmware.h>
  27. #include <linux/crc32.h>
  28. #include <linux/delay.h>
  29. #include <linux/version.h>
  30. #include <linux/slab.h>
  31. #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 26)
  32. #define HAVE_RATELIMIT
  33. #include <linux/ratelimit.h>
  34. #endif
  35. #include <dahdi/kernel.h>
  36. #include <stdbool.h>
  37. #include "wcxb.h"
  38. #include "wcxb_spi.h"
  39. #include "wcxb_flash.h"
  40. /* The definition for Surprise Down was added in Linux 3.6 in (a0dee2e PCI: misc
  41. * pci_reg additions). It may be backported though so we won't check for the
  42. * version. Same with PCI_EXP_SLTCTL_PDCE. */
  43. #ifndef PCI_ERR_UNC_SURPDN
  44. #define PCI_ERR_UNC_SURPDN 0x20
  45. #endif
  46. #ifndef PCI_EXP_SLTCTL_PDCE
  47. #define PCI_EXP_SLTCTL_PDCE 0x8
  48. #endif
  49. /* FPGA Status definitions */
  50. #define OCT_CPU_RESET (1 << 0)
  51. #define OCT_CPU_DRAM_CKE (1 << 1)
  52. #define STATUS_LED_GREEN (1 << 9)
  53. #define STATUS_LED_RED (1 << 10)
  54. #define FALC_CPU_RESET (1 << 11)
  55. /* Descriptor ring definitions */
  56. #define DRING_SIZE (1 << 7) /* Must be in multiples of 2 */
  57. #define DRING_SIZE_MASK (DRING_SIZE-1)
  58. #define DESC_EOR (1 << 0)
  59. #define DESC_INT (1 << 1)
  60. #define DESC_OWN (1 << 31)
  61. #define DESC_DEFAULT_STATUS 0xdeadbe00
  62. #define DMA_CHAN_SIZE 128
  63. /* Echocan definitions */
  64. #define OCT_OFFSET (xb->membase + 0x10000)
  65. #define OCT_CONTROL_REG (OCT_OFFSET + 0)
  66. #define OCT_DATA_REG (OCT_OFFSET + 0x4)
  67. #define OCT_ADDRESS_HIGH_REG (OCT_OFFSET + 0x8)
  68. #define OCT_ADDRESS_LOW_REG (OCT_OFFSET + 0xa)
  69. #define OCT_DIRECT_WRITE_MASK 0x3001
  70. #define OCT_INDIRECT_READ_MASK 0x0101
  71. #define OCT_INDIRECT_WRITE_MASK 0x3101
  72. /* DMA definitions */
  73. #define TDM_DRING_ADDR 0x2000
  74. #define TDM_CONTROL (TDM_DRING_ADDR + 0x4)
  75. #define ENABLE_ECHOCAN_TDM (1 << 0)
  76. #define TDM_RECOVER_CLOCK (1 << 1)
  77. #define ENABLE_DMA (1 << 2)
  78. #define DMA_RUNNING (1 << 3)
  79. #define DMA_LOOPBACK (1 << 4)
  80. #define AUTHENTICATED (1 << 5)
  81. #define TDM_VERSION (TDM_DRING_ADDR + 0x24)
  82. /* Interrupt definitions */
  83. #define INTERRUPT_CONTROL 0x300
  84. #define ISR (INTERRUPT_CONTROL + 0x0)
  85. #define IPR (INTERRUPT_CONTROL + 0x4)
  86. #define IER (INTERRUPT_CONTROL + 0x8)
  87. #define IAR (INTERRUPT_CONTROL + 0xc)
  88. #define SIE (INTERRUPT_CONTROL + 0x10)
  89. #define CIE (INTERRUPT_CONTROL + 0x14)
  90. #define IVR (INTERRUPT_CONTROL + 0x18)
  91. #define MER (INTERRUPT_CONTROL + 0x1c)
  92. #define MER_ME (1<<0)
  93. #define MER_HIE (1<<1)
  94. #define DESC_UNDERRUN (1<<0)
  95. #define DESC_COMPLETE (1<<1)
  96. #define OCT_INT (1<<2)
  97. #define FALC_INT (1<<3)
  98. #define SPI_INT (1<<4)
  99. #define FLASH_SPI_BASE 0x200
  100. struct wcxb_hw_desc {
  101. volatile __be32 status;
  102. __be32 tx_buf;
  103. __be32 rx_buf;
  104. volatile __be32 control;
  105. } __packed;
  106. struct wcxb_meta_desc {
  107. void *tx_buf_virt;
  108. void *rx_buf_virt;
  109. };
  110. static inline bool wcxb_is_pcie(const struct wcxb *xb)
  111. {
  112. #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 33)
  113. return pci_is_pcie(xb->pdev);
  114. #else
  115. #ifndef WCXB_PCI_DEV_DOES_NOT_HAVE_IS_PCIE
  116. return (xb->pdev->is_pcie > 0);
  117. #else
  118. return (xb->flags.is_pcie > 0);
  119. #endif
  120. #endif
  121. }
  122. static const unsigned int CLK_SRC_MASK = ((1 << 13) | (1 << 12) | (1 << 1));
  123. enum wcxb_clock_sources wcxb_get_clksrc(struct wcxb *xb)
  124. {
  125. static const u32 SELF = 0x0;
  126. static const u32 RECOVER = (1 << 1);
  127. static const u32 SLAVE = (1 << 12) | (1 << 1);
  128. unsigned long flags;
  129. u32 reg;
  130. spin_lock_irqsave(&xb->lock, flags);
  131. reg = ioread32be(xb->membase + TDM_CONTROL) & CLK_SRC_MASK;
  132. spin_unlock_irqrestore(&xb->lock, flags);
  133. if (SELF == reg)
  134. return WCXB_CLOCK_SELF;
  135. else if (RECOVER == reg)
  136. return WCXB_CLOCK_RECOVER;
  137. else if (SLAVE == reg)
  138. return WCXB_CLOCK_SLAVE;
  139. else
  140. WARN_ON(1);
  141. return WCXB_CLOCK_SELF;
  142. }
  143. void wcxb_set_clksrc(struct wcxb *xb, enum wcxb_clock_sources clksrc)
  144. {
  145. unsigned long flags;
  146. u32 clkbits = 0;
  147. switch (clksrc) {
  148. case WCXB_CLOCK_RECOVER:
  149. if (xb->flags.drive_timing_cable)
  150. clkbits = (1<<13) | (1 << 1);
  151. else
  152. clkbits = (1 << 1);
  153. break;
  154. case WCXB_CLOCK_SELF:
  155. if (xb->flags.drive_timing_cable)
  156. clkbits = (1<<13);
  157. else
  158. clkbits = 0;
  159. break;
  160. case WCXB_CLOCK_SLAVE:
  161. /* When we're slave, do not ever drive the timing cable. */
  162. clkbits = (1<<12) | (1 << 1);
  163. break;
  164. };
  165. /* set new clock select */
  166. spin_lock_irqsave(&xb->lock, flags);
  167. if (!wcxb_is_stopped(xb)) {
  168. dev_err(&xb->pdev->dev, "ERROR: Cannot set clock source while DMA engine is running.\n");
  169. } else {
  170. u32 reg;
  171. reg = ioread32be(xb->membase + TDM_CONTROL);
  172. reg &= ~CLK_SRC_MASK;
  173. reg |= (clkbits & CLK_SRC_MASK);
  174. iowrite32be(reg, xb->membase + TDM_CONTROL);
  175. }
  176. spin_unlock_irqrestore(&xb->lock, flags);
  177. }
  178. void wcxb_enable_echocan(struct wcxb *xb)
  179. {
  180. u32 reg;
  181. unsigned long flags;
  182. spin_lock_irqsave(&xb->lock, flags);
  183. reg = ioread32be(xb->membase + TDM_CONTROL);
  184. reg |= ENABLE_ECHOCAN_TDM;
  185. iowrite32be(reg, xb->membase + TDM_CONTROL);
  186. spin_unlock_irqrestore(&xb->lock, flags);
  187. }
  188. void wcxb_disable_echocan(struct wcxb *xb)
  189. {
  190. u32 reg;
  191. unsigned long flags;
  192. spin_lock_irqsave(&xb->lock, flags);
  193. reg = ioread32be(xb->membase + TDM_CONTROL);
  194. reg &= ~ENABLE_ECHOCAN_TDM;
  195. iowrite32be(reg, xb->membase + TDM_CONTROL);
  196. spin_unlock_irqrestore(&xb->lock, flags);
  197. }
  198. void wcxb_reset_echocan(struct wcxb *xb)
  199. {
  200. unsigned long flags;
  201. int reg;
  202. spin_lock_irqsave(&xb->lock, flags);
  203. reg = ioread32be(xb->membase);
  204. iowrite32be((reg & ~OCT_CPU_RESET), xb->membase);
  205. spin_unlock_irqrestore(&xb->lock, flags);
  206. msleep_interruptible(1);
  207. spin_lock_irqsave(&xb->lock, flags);
  208. reg = ioread32be(xb->membase);
  209. iowrite32be((reg | OCT_CPU_RESET), xb->membase);
  210. spin_unlock_irqrestore(&xb->lock, flags);
  211. dev_dbg(&xb->pdev->dev, "Reset octasic\n");
  212. }
  213. bool wcxb_is_echocan_present(struct wcxb *xb)
  214. {
  215. return 0x1 == ioread16be(OCT_CONTROL_REG);
  216. }
  217. void wcxb_enable_echocan_dram(struct wcxb *xb)
  218. {
  219. unsigned long flags;
  220. int reg;
  221. spin_lock_irqsave(&xb->lock, flags);
  222. reg = ioread32be(xb->membase);
  223. iowrite32be((reg | OCT_CPU_DRAM_CKE), xb->membase);
  224. spin_unlock_irqrestore(&xb->lock, flags);
  225. }
  226. u16 wcxb_get_echocan_reg(struct wcxb *xb, u32 address)
  227. {
  228. uint16_t highaddress = ((address >> 20) & 0xfff);
  229. uint16_t lowaddress = ((address >> 4) & 0xfffff);
  230. unsigned long stop = jiffies + HZ/10;
  231. unsigned long flags;
  232. u16 ret;
  233. spin_lock_irqsave(&xb->lock, flags);
  234. iowrite16be(highaddress, OCT_ADDRESS_HIGH_REG);
  235. iowrite16be(lowaddress, OCT_ADDRESS_LOW_REG);
  236. iowrite16be(OCT_INDIRECT_READ_MASK | ((address & 0xe) << 8),
  237. OCT_CONTROL_REG);
  238. do {
  239. ret = ioread16be(OCT_CONTROL_REG);
  240. } while ((ret & (1<<8)) && time_before(jiffies, stop));
  241. WARN_ON_ONCE(time_after_eq(jiffies, stop));
  242. ret = ioread16be(OCT_DATA_REG);
  243. spin_unlock_irqrestore(&xb->lock, flags);
  244. return ret;
  245. }
  246. void wcxb_set_echocan_reg(struct wcxb *xb, u32 address, u16 val)
  247. {
  248. unsigned long flags;
  249. uint16_t ret;
  250. uint16_t highaddress = ((address >> 20) & 0xfff);
  251. uint16_t lowaddress = ((address >> 4) & 0xffff);
  252. unsigned long stop = jiffies + HZ/10;
  253. spin_lock_irqsave(&xb->lock, flags);
  254. iowrite16be(highaddress, OCT_ADDRESS_HIGH_REG);
  255. iowrite16be(lowaddress, OCT_ADDRESS_LOW_REG);
  256. iowrite16be(val, OCT_DATA_REG);
  257. iowrite16be(OCT_INDIRECT_WRITE_MASK | ((address & 0xe) << 8),
  258. OCT_CONTROL_REG);
  259. /* No write should take longer than 100ms */
  260. do {
  261. ret = ioread16be(OCT_CONTROL_REG);
  262. } while ((ret & (1<<8)) && time_before(jiffies, stop));
  263. spin_unlock_irqrestore(&xb->lock, flags);
  264. WARN_ON_ONCE(time_after_eq(jiffies, stop));
  265. }
  266. #ifdef HAVE_RATELIMIT
  267. static DEFINE_RATELIMIT_STATE(_underrun_rl, DEFAULT_RATELIMIT_INTERVAL,
  268. DEFAULT_RATELIMIT_BURST);
  269. #endif
  270. /* wcxb_reset_dring needs to be called with xb->lock held. */
  271. static void _wcxb_reset_dring(struct wcxb *xb)
  272. {
  273. int x;
  274. struct wcxb_meta_desc *mdesc;
  275. struct wcxb_hw_desc *hdesc = NULL;
  276. xb->dma_head = xb->dma_tail = 0;
  277. if (unlikely(xb->latency > DRING_SIZE)) {
  278. #ifdef HAVE_RATELIMIT
  279. if (__ratelimit(&_underrun_rl)) {
  280. #else
  281. if (printk_ratelimit()) {
  282. #endif
  283. dev_info(&xb->pdev->dev,
  284. "Oops! Tried to increase latency past buffer size.\n");
  285. }
  286. xb->latency = DRING_SIZE;
  287. }
  288. for (x = 0; x < xb->latency; x++) {
  289. dma_addr_t dma_tmp;
  290. mdesc = &xb->meta_dring[x];
  291. hdesc = &xb->hw_dring[x];
  292. hdesc->status = cpu_to_be32(DESC_DEFAULT_STATUS);
  293. if (!mdesc->tx_buf_virt) {
  294. mdesc->tx_buf_virt =
  295. dma_pool_alloc(xb->pool, GFP_ATOMIC, &dma_tmp);
  296. hdesc->tx_buf = cpu_to_be32(dma_tmp);
  297. mdesc->rx_buf_virt =
  298. dma_pool_alloc(xb->pool, GFP_ATOMIC, &dma_tmp);
  299. hdesc->rx_buf = cpu_to_be32(dma_tmp);
  300. }
  301. hdesc->control = cpu_to_be32(DESC_INT|DESC_OWN);
  302. BUG_ON(!mdesc->tx_buf_virt || !mdesc->rx_buf_virt);
  303. }
  304. BUG_ON(!hdesc);
  305. /* Set end of ring bit in last descriptor to force hw to loop around */
  306. hdesc->control |= cpu_to_be32(DESC_EOR);
  307. #ifdef DEBUG
  308. xb->last_retry_count = 0;
  309. xb->max_retry_count = 0;
  310. xb->last_dma_time = 0;
  311. xb->max_dma_time = 0;
  312. #endif
  313. iowrite32be(xb->hw_dring_phys, xb->membase + TDM_DRING_ADDR);
  314. }
  315. static void wcxb_handle_dma(struct wcxb *xb)
  316. {
  317. struct wcxb_meta_desc *mdesc;
  318. struct wcxb_hw_desc *tail = &(xb->hw_dring[xb->dma_tail]);
  319. while (!(tail->control & cpu_to_be32(DESC_OWN))) {
  320. u_char *frame;
  321. #ifdef DEBUG
  322. xb->last_retry_count =
  323. ((be32_to_cpu(tail->control) & 0x0000ff00) >> 8);
  324. xb->last_dma_time = (be32_to_cpu(tail->status));
  325. #endif
  326. mdesc = &xb->meta_dring[xb->dma_tail];
  327. frame = mdesc->rx_buf_virt;
  328. xb->ops->handle_receive(xb, frame);
  329. xb->dma_tail =
  330. (xb->dma_tail == xb->latency-1) ? 0 : xb->dma_tail + 1;
  331. tail = &(xb->hw_dring[xb->dma_tail]);
  332. mdesc = &xb->meta_dring[xb->dma_head];
  333. frame = mdesc->tx_buf_virt;
  334. xb->ops->handle_transmit(xb, frame);
  335. wmb();
  336. xb->hw_dring[xb->dma_head].control |= cpu_to_be32(DESC_OWN);
  337. xb->dma_head =
  338. (xb->dma_head == xb->latency-1) ? 0 : xb->dma_head + 1;
  339. }
  340. #ifdef DEBUG
  341. if (xb->last_retry_count > xb->max_retry_count) {
  342. xb->max_retry_count = xb->last_retry_count;
  343. dev_info(&xb->pdev->dev,
  344. "New DMA max retries detected: %d\n",
  345. xb->max_retry_count);
  346. }
  347. if (xb->last_dma_time > xb->max_dma_time) {
  348. xb->max_dma_time = xb->last_dma_time;
  349. dev_info(&xb->pdev->dev,
  350. "New DMA max transfer time detected: %d\n",
  351. xb->max_dma_time);
  352. }
  353. #endif
  354. }
  355. static irqreturn_t _wcxb_isr(int irq, void *dev_id)
  356. {
  357. struct wcxb *xb = dev_id;
  358. unsigned int limit = 8;
  359. u32 pending;
  360. pending = ioread32be(xb->membase + ISR);
  361. if (!pending)
  362. return IRQ_NONE;
  363. do {
  364. iowrite32be(pending, xb->membase + IAR);
  365. if (pending & DESC_UNDERRUN) {
  366. u32 reg;
  367. /* Report the error in case drivers have any custom
  368. * methods for indicating potential data corruption. An
  369. * underrun means data loss in the TDM channel. */
  370. if (xb->ops->handle_error)
  371. xb->ops->handle_error(xb);
  372. spin_lock(&xb->lock);
  373. if (!xb->flags.latency_locked) {
  374. /* bump latency */
  375. xb->latency = min(xb->latency + 1,
  376. xb->max_latency);
  377. #ifdef HAVE_RATELIMIT
  378. if (__ratelimit(&_underrun_rl)) {
  379. #else
  380. if (printk_ratelimit()) {
  381. #endif
  382. if (xb->latency != xb->max_latency) {
  383. dev_info(&xb->pdev->dev,
  384. "Underrun detected by hardware. Latency bumped to: %dms\n",
  385. xb->latency);
  386. } else {
  387. dev_info(&xb->pdev->dev,
  388. "Underrun detected by hardware. Latency at max of %dms.\n",
  389. xb->latency);
  390. }
  391. }
  392. }
  393. /* re-setup dma ring */
  394. _wcxb_reset_dring(xb);
  395. /* set dma enable bit */
  396. reg = ioread32be(xb->membase + TDM_CONTROL);
  397. reg |= ENABLE_DMA;
  398. iowrite32be(reg, xb->membase + TDM_CONTROL);
  399. spin_unlock(&xb->lock);
  400. }
  401. if (pending & DESC_COMPLETE) {
  402. xb->framecount++;
  403. wcxb_handle_dma(xb);
  404. }
  405. if (NULL != xb->ops->handle_interrupt)
  406. xb->ops->handle_interrupt(xb, pending);
  407. pending = ioread32be(xb->membase + ISR);
  408. } while (pending && --limit);
  409. return IRQ_HANDLED;
  410. }
  411. DAHDI_IRQ_HANDLER(wcxb_isr)
  412. {
  413. irqreturn_t ret;
  414. unsigned long flags;
  415. local_irq_save(flags);
  416. ret = _wcxb_isr(irq, dev_id);
  417. local_irq_restore(flags);
  418. return ret;
  419. }
  420. static int wcxb_alloc_dring(struct wcxb *xb, const char *board_name)
  421. {
  422. xb->meta_dring =
  423. kzalloc(sizeof(struct wcxb_meta_desc) * DRING_SIZE,
  424. GFP_KERNEL);
  425. if (!xb->meta_dring)
  426. return -ENOMEM;
  427. xb->hw_dring = dma_alloc_coherent(&xb->pdev->dev,
  428. sizeof(struct wcxb_hw_desc) * DRING_SIZE,
  429. &xb->hw_dring_phys,
  430. GFP_KERNEL);
  431. if (!xb->hw_dring) {
  432. kfree(xb->meta_dring);
  433. return -ENOMEM;
  434. }
  435. xb->pool = dma_pool_create(board_name, &xb->pdev->dev,
  436. PAGE_SIZE, PAGE_SIZE, 0);
  437. if (!xb->pool) {
  438. kfree(xb->meta_dring);
  439. dma_free_coherent(&xb->pdev->dev,
  440. sizeof(struct wcxb_hw_desc) * DRING_SIZE,
  441. xb->hw_dring,
  442. xb->hw_dring_phys);
  443. return -ENOMEM;
  444. }
  445. return 0;
  446. }
  447. /**
  448. * wcxb_soft_reset - Set interface registers back to known good values.
  449. *
  450. * This represents the normal default state after a reset of the FPGA. This
  451. * function is preferred over the hard reset function.
  452. *
  453. */
  454. static void wcxb_soft_reset(struct wcxb *xb)
  455. {
  456. /* digium_gpo */
  457. iowrite32be(0x0, xb->membase);
  458. /* xps_intc */
  459. iowrite32be(0x0, xb->membase + 0x300);
  460. iowrite32be(0x0, xb->membase + 0x308);
  461. iowrite32be(0x0, xb->membase + 0x310);
  462. iowrite32be(0x0, xb->membase + 0x31C);
  463. /* xps_spi_config_flash */
  464. iowrite32be(0xA, xb->membase + 0x200);
  465. /* tdm engine */
  466. iowrite32be(0x0, xb->membase + 0x2000);
  467. iowrite32be(0x0, xb->membase + 0x2004);
  468. }
  469. static void _wcxb_hard_reset(struct wcxb *xb)
  470. {
  471. struct pci_dev *const pdev = xb->pdev;
  472. u32 microblaze_version;
  473. unsigned long stop_time = jiffies + msecs_to_jiffies(2000);
  474. pci_save_state(pdev);
  475. iowrite32be(0xe00, xb->membase + TDM_CONTROL);
  476. /* This sleep is to give FPGA time to bring up the PCI/PCIe interface */
  477. msleep(200);
  478. pci_restore_state(pdev);
  479. /* Wait for the Microblaze CPU to complete it's startup */
  480. do {
  481. msleep(20);
  482. /* Can return either 0xffff or 0 before it's fully booted */
  483. microblaze_version = ioread32be(xb->membase + 0x2018) ?: 0xffff;
  484. } while (time_before(jiffies, stop_time)
  485. && 0xffff == microblaze_version);
  486. }
  487. /*
  488. * Since the FPGA hard reset drops the PCIe link we need to disable
  489. * error reporting on the upsteam link. Otherwise Surprise Down errors
  490. * may be reported in reponse to the link going away.
  491. *
  492. * NOTE: We cannot use pci_disable_pcie_error_reporting() because it will not
  493. * disable error reporting if the system firmware is attached to the advanced
  494. * error reporting mechanism.
  495. */
  496. static void _wcxb_pcie_hard_reset(struct wcxb *xb)
  497. {
  498. struct pci_dev *const parent = xb->pdev->bus->self;
  499. u32 aer_mask;
  500. u16 sltctl;
  501. int pos_err;
  502. int pos_exp;
  503. if (!wcxb_is_pcie(xb))
  504. return;
  505. pos_err = pci_find_ext_capability(parent, PCI_EXT_CAP_ID_ERR);
  506. if (pos_err) {
  507. pci_read_config_dword(parent, pos_err + PCI_ERR_UNCOR_MASK,
  508. &aer_mask);
  509. pci_write_config_dword(parent, pos_err + PCI_ERR_UNCOR_MASK,
  510. aer_mask | PCI_ERR_UNC_SURPDN);
  511. }
  512. /* Also disable any presence change reporting. */
  513. pos_exp = pci_find_capability(parent, PCI_CAP_ID_EXP);
  514. if (pos_exp) {
  515. pci_read_config_word(parent, pos_exp + PCI_EXP_SLTCTL,
  516. &sltctl);
  517. pci_write_config_word(parent, pos_exp + PCI_EXP_SLTCTL,
  518. sltctl & ~PCI_EXP_SLTCTL_PDCE);
  519. }
  520. _wcxb_hard_reset(xb);
  521. if (pos_exp)
  522. pci_write_config_word(parent, pos_exp + PCI_EXP_SLTCTL, sltctl);
  523. if (pos_err) {
  524. pci_write_config_dword(parent, pos_err + PCI_ERR_UNCOR_MASK,
  525. aer_mask);
  526. /* Clear the error as well from the status register. */
  527. pci_write_config_dword(parent, pos_err + PCI_ERR_UNCOR_STATUS,
  528. PCI_ERR_UNC_SURPDN);
  529. }
  530. return;
  531. }
  532. /**
  533. * wcxb_hard_reset - Reset FPGA and reload firmware.
  534. *
  535. * This may be called in the context of device probe and therefore the PCI
  536. * device may be locked.
  537. *
  538. */
  539. static void wcxb_hard_reset(struct wcxb *xb)
  540. {
  541. if (wcxb_is_pcie(xb))
  542. _wcxb_pcie_hard_reset(xb);
  543. else
  544. _wcxb_hard_reset(xb);
  545. }
  546. int wcxb_init(struct wcxb *xb, const char *board_name, u32 int_mode)
  547. {
  548. int res = 0;
  549. struct pci_dev *pdev = xb->pdev;
  550. u32 tdm_control;
  551. if (pci_enable_device(pdev))
  552. return -EIO;
  553. pci_set_master(pdev);
  554. #ifdef WCXB_PCI_DEV_DOES_NOT_HAVE_IS_PCIE
  555. xb->flags.is_pcie = pci_find_capability(pdev, PCI_CAP_ID_EXP) ? 1 : 0;
  556. #endif
  557. WARN_ON(!pdev);
  558. if (!pdev)
  559. return -EINVAL;
  560. xb->latency = WCXB_DEFAULT_LATENCY;
  561. xb->max_latency = WCXB_DEFAULT_MAXLATENCY;
  562. spin_lock_init(&xb->lock);
  563. xb->membase = pci_iomap(pdev, 0, 0);
  564. if (pci_request_regions(pdev, board_name))
  565. dev_info(&xb->pdev->dev, "Unable to request regions\n");
  566. wcxb_soft_reset(xb);
  567. res = wcxb_alloc_dring(xb, board_name);
  568. if (res) {
  569. dev_err(&xb->pdev->dev,
  570. "Failed to allocate descriptor rings.\n");
  571. goto fail_exit;
  572. }
  573. /* Enable writes to fpga status register */
  574. iowrite32be(0, xb->membase + 0x04);
  575. xb->flags.have_msi = (int_mode) ? 0 : (0 == pci_enable_msi(pdev));
  576. if (request_irq(pdev->irq, wcxb_isr,
  577. (xb->flags.have_msi) ? 0 : IRQF_SHARED,
  578. board_name, xb)) {
  579. dev_notice(&xb->pdev->dev, "Unable to request IRQ %d\n",
  580. pdev->irq);
  581. res = -EIO;
  582. goto fail_exit;
  583. }
  584. iowrite32be(0, xb->membase + TDM_CONTROL);
  585. tdm_control = ioread32be(xb->membase + TDM_CONTROL);
  586. if (!(tdm_control & 0x20)) {
  587. dev_err(&xb->pdev->dev,
  588. "This board is not authenticated and may not function properly.\n");
  589. msleep(1000);
  590. } else {
  591. dev_dbg(&xb->pdev->dev, "Authenticated. %08x\n", tdm_control);
  592. }
  593. return res;
  594. fail_exit:
  595. pci_release_regions(xb->pdev);
  596. return res;
  597. }
  598. void wcxb_stop_dma(struct wcxb *xb)
  599. {
  600. unsigned long flags;
  601. u32 reg;
  602. /* Quiesce DMA engine interrupts */
  603. spin_lock_irqsave(&xb->lock, flags);
  604. reg = ioread32be(xb->membase + TDM_CONTROL);
  605. reg &= ~ENABLE_DMA;
  606. iowrite32be(reg, xb->membase + TDM_CONTROL);
  607. spin_unlock_irqrestore(&xb->lock, flags);
  608. }
  609. int wcxb_wait_for_stop(struct wcxb *xb, unsigned long timeout_ms)
  610. {
  611. unsigned long stop;
  612. stop = jiffies + msecs_to_jiffies(timeout_ms);
  613. do {
  614. if (time_after(jiffies, stop))
  615. return -EIO;
  616. else
  617. cpu_relax();
  618. } while (!wcxb_is_stopped(xb));
  619. return 0;
  620. }
  621. void wcxb_disable_interrupts(struct wcxb *xb)
  622. {
  623. iowrite32be(0, xb->membase + IER);
  624. }
  625. void wcxb_stop(struct wcxb *xb)
  626. {
  627. unsigned long flags;
  628. spin_lock_irqsave(&xb->lock, flags);
  629. /* Stop everything */
  630. iowrite32be(0, xb->membase + TDM_CONTROL);
  631. iowrite32be(0, xb->membase + IER);
  632. iowrite32be(0, xb->membase + MER);
  633. iowrite32be(-1, xb->membase + IAR);
  634. /* Flush quiesce commands before exit */
  635. ioread32be(xb->membase);
  636. spin_unlock_irqrestore(&xb->lock, flags);
  637. synchronize_irq(xb->pdev->irq);
  638. }
  639. bool wcxb_is_stopped(struct wcxb *xb)
  640. {
  641. return !(ioread32be(xb->membase + TDM_CONTROL) & DMA_RUNNING);
  642. }
  643. static void wcxb_free_dring(struct wcxb *xb)
  644. {
  645. struct wcxb_meta_desc *mdesc;
  646. struct wcxb_hw_desc *hdesc;
  647. int i;
  648. /* Free tx/rx buffs */
  649. for (i = 0; i < DRING_SIZE; i++) {
  650. mdesc = &xb->meta_dring[i];
  651. hdesc = &xb->hw_dring[i];
  652. if (mdesc->tx_buf_virt) {
  653. dma_pool_free(xb->pool,
  654. mdesc->tx_buf_virt,
  655. be32_to_cpu(hdesc->tx_buf));
  656. dma_pool_free(xb->pool,
  657. mdesc->rx_buf_virt,
  658. be32_to_cpu(hdesc->rx_buf));
  659. }
  660. }
  661. dma_pool_destroy(xb->pool);
  662. dma_free_coherent(&xb->pdev->dev,
  663. sizeof(struct wcxb_hw_desc) * DRING_SIZE,
  664. xb->hw_dring,
  665. xb->hw_dring_phys);
  666. kfree(xb->meta_dring);
  667. }
  668. void wcxb_release(struct wcxb *xb)
  669. {
  670. wcxb_stop(xb);
  671. synchronize_irq(xb->pdev->irq);
  672. free_irq(xb->pdev->irq, xb);
  673. if (xb->flags.have_msi)
  674. pci_disable_msi(xb->pdev);
  675. if (xb->membase)
  676. pci_iounmap(xb->pdev, xb->membase);
  677. wcxb_free_dring(xb);
  678. pci_release_regions(xb->pdev);
  679. pci_disable_device(xb->pdev);
  680. return;
  681. }
  682. int wcxb_start(struct wcxb *xb)
  683. {
  684. u32 reg;
  685. unsigned long flags;
  686. spin_lock_irqsave(&xb->lock, flags);
  687. _wcxb_reset_dring(xb);
  688. /* Enable hardware interrupts */
  689. iowrite32be(-1, xb->membase + IAR);
  690. iowrite32be(DESC_UNDERRUN|DESC_COMPLETE, xb->membase + IER);
  691. /* iowrite32be(0x3f7, xb->membase + IER); */
  692. iowrite32be(MER_ME|MER_HIE, xb->membase + MER);
  693. /* Start the DMA engine processing. */
  694. reg = ioread32be(xb->membase + TDM_CONTROL);
  695. reg |= ENABLE_DMA;
  696. iowrite32be(reg, xb->membase + TDM_CONTROL);
  697. spin_unlock_irqrestore(&xb->lock, flags);
  698. return 0;
  699. }
  700. struct wcxb_meta_block {
  701. __le32 chksum;
  702. __le32 version;
  703. __le32 size;
  704. } __packed;
  705. struct wcxb_firm_header {
  706. u8 header[6];
  707. __le32 chksum;
  708. u8 pad[18];
  709. __le32 version;
  710. } __packed;
  711. u32 wcxb_get_firmware_version(struct wcxb *xb)
  712. {
  713. u32 version = 0;
  714. /* Two version registers are read and catenated into one */
  715. /* Firmware version goes in bits upper byte */
  716. version = ((ioread32be(xb->membase + 0x400) & 0xffff)<<16);
  717. /* Microblaze version goes in lower word */
  718. version += ioread32be(xb->membase + 0x2018);
  719. return version;
  720. }
  721. static int wcxb_update_firmware(struct wcxb *xb, const struct firmware *fw,
  722. const char *filename,
  723. enum wcxb_reset_option reset)
  724. {
  725. u32 tdm_control;
  726. static const int APPLICATION_ADDRESS = 0x200000;
  727. static const int META_BLOCK_OFFSET = 0x170000;
  728. static const int ERASE_BLOCK_SIZE = 0x010000;
  729. static const int END_OFFSET = APPLICATION_ADDRESS + META_BLOCK_OFFSET +
  730. ERASE_BLOCK_SIZE;
  731. struct wcxb_spi_master *flash_spi_master;
  732. struct wcxb_spi_device *flash_spi_device;
  733. struct wcxb_meta_block meta;
  734. int offset;
  735. struct wcxb_firm_header *head = (struct wcxb_firm_header *)(fw->data);
  736. if (fw->size > (META_BLOCK_OFFSET + sizeof(*head))) {
  737. dev_err(&xb->pdev->dev,
  738. "Firmware is too large to fit in available space.\n");
  739. return -EINVAL;
  740. }
  741. meta.size = cpu_to_le32(fw->size);
  742. meta.version = head->version;
  743. meta.chksum = head->chksum;
  744. flash_spi_master = wcxb_spi_master_create(&xb->pdev->dev,
  745. xb->membase + FLASH_SPI_BASE,
  746. false);
  747. flash_spi_device = wcxb_spi_device_create(flash_spi_master, 0);
  748. dev_info(&xb->pdev->dev,
  749. "Uploading %s. This can take up to 30 seconds.\n", filename);
  750. /* First erase all the blocks in the application area. */
  751. offset = APPLICATION_ADDRESS;
  752. while (offset < END_OFFSET) {
  753. wcxb_flash_sector_erase(flash_spi_device, offset);
  754. offset += ERASE_BLOCK_SIZE;
  755. }
  756. /* Then write the new firmware file. */
  757. wcxb_flash_write(flash_spi_device, APPLICATION_ADDRESS,
  758. &fw->data[sizeof(struct wcxb_firm_header)],
  759. fw->size - sizeof(struct wcxb_firm_header));
  760. /* Finally, update the meta block. */
  761. wcxb_flash_write(flash_spi_device,
  762. APPLICATION_ADDRESS + META_BLOCK_OFFSET,
  763. &meta, sizeof(meta));
  764. if (WCXB_RESET_NOW == reset) {
  765. /* Reset fpga after loading firmware */
  766. dev_info(&xb->pdev->dev,
  767. "Firmware load complete. Reseting device.\n");
  768. tdm_control = ioread32be(xb->membase + TDM_CONTROL);
  769. wcxb_hard_reset(xb);
  770. iowrite32be(0, xb->membase + 0x04);
  771. iowrite32be(tdm_control, xb->membase + TDM_CONTROL);
  772. } else {
  773. dev_info(&xb->pdev->dev,
  774. "Delaying reset. Firmware load requires a power cycle\n");
  775. }
  776. wcxb_spi_device_destroy(flash_spi_device);
  777. wcxb_spi_master_destroy(flash_spi_master);
  778. return 0;
  779. }
  780. int wcxb_check_firmware(struct wcxb *xb, const u32 expected_version,
  781. const char *firmware_filename, bool force_firmware,
  782. enum wcxb_reset_option reset)
  783. {
  784. const struct firmware *fw;
  785. const struct wcxb_firm_header *header;
  786. static const int APPLICATION_ADDRESS = 0x200000;
  787. static const int META_BLOCK_OFFSET = 0x170000;
  788. struct wcxb_spi_master *flash_spi_master;
  789. struct wcxb_spi_device *flash_spi_device;
  790. struct wcxb_meta_block meta;
  791. int res = 0;
  792. u32 crc;
  793. u32 version = 0;
  794. version = wcxb_get_firmware_version(xb);
  795. if (0xff000000 == (version & 0xff000000)) {
  796. dev_info(&xb->pdev->dev,
  797. "Invalid firmware %x. Please check your hardware.\n",
  798. version);
  799. return -EIO;
  800. }
  801. if ((expected_version == version) && !force_firmware) {
  802. dev_info(&xb->pdev->dev, "Firmware version: %x\n", version);
  803. return 0;
  804. }
  805. /* Check meta firmware version for a not-booted application image */
  806. flash_spi_master = wcxb_spi_master_create(&xb->pdev->dev,
  807. xb->membase + FLASH_SPI_BASE,
  808. false);
  809. flash_spi_device = wcxb_spi_device_create(flash_spi_master, 0);
  810. res = wcxb_flash_read(flash_spi_device,
  811. APPLICATION_ADDRESS + META_BLOCK_OFFSET,
  812. &meta, sizeof(meta));
  813. if (res) {
  814. dev_info(&xb->pdev->dev, "Unable to read flash\n");
  815. return -EIO;
  816. }
  817. if ((meta.version == cpu_to_le32(expected_version))
  818. && !force_firmware) {
  819. dev_info(&xb->pdev->dev,
  820. "Detected previous firmware updated to current version %x, but %x is currently running on card. You likely need to power cycle your system.\n",
  821. expected_version, version);
  822. return 0;
  823. }
  824. if (force_firmware) {
  825. dev_info(&xb->pdev->dev,
  826. "force_firmware module parameter is set. Forcing firmware load, regardless of version\n");
  827. } else {
  828. dev_info(&xb->pdev->dev,
  829. "Firmware version %x is running, but we require version %x.\n",
  830. version, expected_version);
  831. }
  832. res = request_firmware(&fw, firmware_filename, &xb->pdev->dev);
  833. if (res) {
  834. dev_info(&xb->pdev->dev,
  835. "Firmware '%s' not available from userspace.\n",
  836. firmware_filename);
  837. goto cleanup;
  838. }
  839. header = (const struct wcxb_firm_header *)fw->data;
  840. /* Check the crc */
  841. crc = crc32(~0, &fw->data[10], fw->size - 10) ^ ~0;
  842. if (memcmp("DIGIUM", header->header, sizeof(header->header)) ||
  843. (le32_to_cpu(header->chksum) != crc)) {
  844. dev_info(&xb->pdev->dev,
  845. "%s is invalid. Please reinstall.\n",
  846. firmware_filename);
  847. goto cleanup;
  848. }
  849. /* Check the file vs required firmware versions */
  850. if (le32_to_cpu(header->version) != expected_version) {
  851. dev_err(&xb->pdev->dev,
  852. "Existing firmware file %s is version %x, but we require %x. Please install the correct firmware file.\n",
  853. firmware_filename, le32_to_cpu(header->version),
  854. expected_version);
  855. res = -EIO;
  856. goto cleanup;
  857. }
  858. dev_info(&xb->pdev->dev, "Found %s (version: %x) Preparing for flash\n",
  859. firmware_filename, header->version);
  860. res = wcxb_update_firmware(xb, fw, firmware_filename, reset);
  861. version = wcxb_get_firmware_version(xb);
  862. if (WCXB_RESET_NOW == reset) {
  863. dev_info(&xb->pdev->dev,
  864. "Reset into firmware version: %x\n", version);
  865. } else {
  866. dev_info(&xb->pdev->dev,
  867. "Running firmware version: %x\n", version);
  868. dev_info(&xb->pdev->dev,
  869. "Loaded firmware version: %x (Will load after next power cycle)\n",
  870. header->version);
  871. }
  872. if ((WCXB_RESET_NOW == reset) && (expected_version != version)
  873. && !force_firmware) {
  874. /* On the off chance that the interface is in a state where it
  875. * cannot boot into the updated firmware image, power cycling
  876. * the card can recover. A simple "reset" of the computer is not
  877. * sufficient, power has to be removed completely. */
  878. dev_err(&xb->pdev->dev,
  879. "The wrong firmware is running after update. Please power cycle and try again.\n");
  880. res = -EIO;
  881. goto cleanup;
  882. }
  883. if (res) {
  884. dev_info(&xb->pdev->dev,
  885. "Failed to load firmware %s\n", firmware_filename);
  886. }
  887. cleanup:
  888. release_firmware(fw);
  889. return res;
  890. }