cobalt-omnitek.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Omnitek Scatter-Gather DMA Controller
  4. *
  5. * Copyright 2012-2015 Cisco Systems, Inc. and/or its affiliates.
  6. * All rights reserved.
  7. */
  8. #include <linux/string.h>
  9. #include <linux/io.h>
  10. #include <linux/pci_regs.h>
  11. #include <linux/spinlock.h>
  12. #include "cobalt-driver.h"
  13. #include "cobalt-omnitek.h"
  14. /* descriptor */
  15. #define END_OF_CHAIN (1 << 1)
  16. #define INTERRUPT_ENABLE (1 << 2)
  17. #define WRITE_TO_PCI (1 << 3)
  18. #define READ_FROM_PCI (0 << 3)
  19. #define DESCRIPTOR_FLAG_MSK (END_OF_CHAIN | INTERRUPT_ENABLE | WRITE_TO_PCI)
  20. #define NEXT_ADRS_MSK 0xffffffe0
  21. /* control/status register */
  22. #define ENABLE (1 << 0)
  23. #define START (1 << 1)
  24. #define ABORT (1 << 2)
  25. #define DONE (1 << 4)
  26. #define SG_INTERRUPT (1 << 5)
  27. #define EVENT_INTERRUPT (1 << 6)
  28. #define SCATTER_GATHER_MODE (1 << 8)
  29. #define DISABLE_VIDEO_RESYNC (1 << 9)
  30. #define EVENT_INTERRUPT_ENABLE (1 << 10)
  31. #define DIRECTIONAL_MSK (3 << 16)
  32. #define INPUT_ONLY (0 << 16)
  33. #define OUTPUT_ONLY (1 << 16)
  34. #define BIDIRECTIONAL (2 << 16)
  35. #define DMA_TYPE_MEMORY (0 << 18)
  36. #define DMA_TYPE_FIFO (1 << 18)
  37. #define BASE (cobalt->bar0)
  38. #define CAPABILITY_HEADER (BASE)
  39. #define CAPABILITY_REGISTER (BASE + 0x04)
  40. #define PCI_64BIT (1 << 8)
  41. #define LOCAL_64BIT (1 << 9)
  42. #define INTERRUPT_STATUS (BASE + 0x08)
  43. #define PCI(c) (BASE + 0x40 + ((c) * 0x40))
  44. #define SIZE(c) (BASE + 0x58 + ((c) * 0x40))
  45. #define DESCRIPTOR(c) (BASE + 0x50 + ((c) * 0x40))
  46. #define CS_REG(c) (BASE + 0x60 + ((c) * 0x40))
  47. #define BYTES_TRANSFERRED(c) (BASE + 0x64 + ((c) * 0x40))
  48. static char *get_dma_direction(u32 status)
  49. {
  50. switch (status & DIRECTIONAL_MSK) {
  51. case INPUT_ONLY: return "Input";
  52. case OUTPUT_ONLY: return "Output";
  53. case BIDIRECTIONAL: return "Bidirectional";
  54. }
  55. return "";
  56. }
  57. static void show_dma_capability(struct cobalt *cobalt)
  58. {
  59. u32 header = ioread32(CAPABILITY_HEADER);
  60. u32 capa = ioread32(CAPABILITY_REGISTER);
  61. u32 i;
  62. cobalt_info("Omnitek DMA capability: ID 0x%02x Version 0x%02x Next 0x%x Size 0x%x\n",
  63. header & 0xff, (header >> 8) & 0xff,
  64. (header >> 16) & 0xffff, (capa >> 24) & 0xff);
  65. switch ((capa >> 8) & 0x3) {
  66. case 0:
  67. cobalt_info("Omnitek DMA: 32 bits PCIe and Local\n");
  68. break;
  69. case 1:
  70. cobalt_info("Omnitek DMA: 64 bits PCIe, 32 bits Local\n");
  71. break;
  72. case 3:
  73. cobalt_info("Omnitek DMA: 64 bits PCIe and Local\n");
  74. break;
  75. }
  76. for (i = 0; i < (capa & 0xf); i++) {
  77. u32 status = ioread32(CS_REG(i));
  78. cobalt_info("Omnitek DMA channel #%d: %s %s\n", i,
  79. status & DMA_TYPE_FIFO ? "FIFO" : "MEMORY",
  80. get_dma_direction(status));
  81. }
  82. }
  83. void omni_sg_dma_start(struct cobalt_stream *s, struct sg_dma_desc_info *desc)
  84. {
  85. struct cobalt *cobalt = s->cobalt;
  86. iowrite32((u32)((u64)desc->bus >> 32), DESCRIPTOR(s->dma_channel) + 4);
  87. iowrite32((u32)desc->bus & NEXT_ADRS_MSK, DESCRIPTOR(s->dma_channel));
  88. iowrite32(ENABLE | SCATTER_GATHER_MODE | START, CS_REG(s->dma_channel));
  89. }
  90. bool is_dma_done(struct cobalt_stream *s)
  91. {
  92. struct cobalt *cobalt = s->cobalt;
  93. if (ioread32(CS_REG(s->dma_channel)) & DONE)
  94. return true;
  95. return false;
  96. }
  97. void omni_sg_dma_abort_channel(struct cobalt_stream *s)
  98. {
  99. struct cobalt *cobalt = s->cobalt;
  100. if (is_dma_done(s) == false)
  101. iowrite32(ABORT, CS_REG(s->dma_channel));
  102. }
  103. int omni_sg_dma_init(struct cobalt *cobalt)
  104. {
  105. u32 capa = ioread32(CAPABILITY_REGISTER);
  106. int i;
  107. cobalt->first_fifo_channel = 0;
  108. cobalt->dma_channels = capa & 0xf;
  109. if (capa & PCI_64BIT)
  110. cobalt->pci_32_bit = false;
  111. else
  112. cobalt->pci_32_bit = true;
  113. for (i = 0; i < cobalt->dma_channels; i++) {
  114. u32 status = ioread32(CS_REG(i));
  115. u32 ctrl = ioread32(CS_REG(i));
  116. if (!(ctrl & DONE))
  117. iowrite32(ABORT, CS_REG(i));
  118. if (!(status & DMA_TYPE_FIFO))
  119. cobalt->first_fifo_channel++;
  120. }
  121. show_dma_capability(cobalt);
  122. return 0;
  123. }
  124. int descriptor_list_create(struct cobalt *cobalt,
  125. struct scatterlist *scatter_list, bool to_pci, unsigned sglen,
  126. unsigned size, unsigned width, unsigned stride,
  127. struct sg_dma_desc_info *desc)
  128. {
  129. struct sg_dma_descriptor *d = (struct sg_dma_descriptor *)desc->virt;
  130. dma_addr_t next = desc->bus;
  131. unsigned offset = 0;
  132. unsigned copy_bytes = width;
  133. unsigned copied = 0;
  134. bool first = true;
  135. /* Must be 4-byte aligned */
  136. WARN_ON(sg_dma_address(scatter_list) & 3);
  137. WARN_ON(size & 3);
  138. WARN_ON(next & 3);
  139. WARN_ON(stride & 3);
  140. WARN_ON(stride < width);
  141. if (width >= stride)
  142. copy_bytes = stride = size;
  143. while (size) {
  144. dma_addr_t addr = sg_dma_address(scatter_list) + offset;
  145. unsigned bytes;
  146. if (addr == 0)
  147. return -EFAULT;
  148. if (cobalt->pci_32_bit) {
  149. WARN_ON((u64)addr >> 32);
  150. if ((u64)addr >> 32)
  151. return -EFAULT;
  152. }
  153. /* PCIe address */
  154. d->pci_l = addr & 0xffffffff;
  155. /* If dma_addr_t is 32 bits, then addr >> 32 is actually the
  156. equivalent of addr >> 0 in gcc. So must cast to u64. */
  157. d->pci_h = (u64)addr >> 32;
  158. /* Sync to start of streaming frame */
  159. d->local = 0;
  160. d->reserved0 = 0;
  161. /* Transfer bytes */
  162. bytes = min(sg_dma_len(scatter_list) - offset,
  163. copy_bytes - copied);
  164. if (first) {
  165. if (to_pci)
  166. d->local = 0x11111111;
  167. first = false;
  168. if (sglen == 1) {
  169. /* Make sure there are always at least two
  170. * descriptors */
  171. d->bytes = (bytes / 2) & ~3;
  172. d->reserved1 = 0;
  173. size -= d->bytes;
  174. copied += d->bytes;
  175. offset += d->bytes;
  176. addr += d->bytes;
  177. next += sizeof(struct sg_dma_descriptor);
  178. d->next_h = (u32)((u64)next >> 32);
  179. d->next_l = (u32)next |
  180. (to_pci ? WRITE_TO_PCI : 0);
  181. bytes -= d->bytes;
  182. d++;
  183. /* PCIe address */
  184. d->pci_l = addr & 0xffffffff;
  185. /* If dma_addr_t is 32 bits, then addr >> 32
  186. * is actually the equivalent of addr >> 0 in
  187. * gcc. So must cast to u64. */
  188. d->pci_h = (u64)addr >> 32;
  189. /* Sync to start of streaming frame */
  190. d->local = 0;
  191. d->reserved0 = 0;
  192. }
  193. }
  194. d->bytes = bytes;
  195. d->reserved1 = 0;
  196. size -= bytes;
  197. copied += bytes;
  198. offset += bytes;
  199. if (copied == copy_bytes) {
  200. while (copied < stride) {
  201. bytes = min(sg_dma_len(scatter_list) - offset,
  202. stride - copied);
  203. copied += bytes;
  204. offset += bytes;
  205. size -= bytes;
  206. if (sg_dma_len(scatter_list) == offset) {
  207. offset = 0;
  208. scatter_list = sg_next(scatter_list);
  209. }
  210. }
  211. copied = 0;
  212. } else {
  213. offset = 0;
  214. scatter_list = sg_next(scatter_list);
  215. }
  216. /* Next descriptor + control bits */
  217. next += sizeof(struct sg_dma_descriptor);
  218. if (size == 0) {
  219. /* Loopback to the first descriptor */
  220. d->next_h = (u32)((u64)desc->bus >> 32);
  221. d->next_l = (u32)desc->bus |
  222. (to_pci ? WRITE_TO_PCI : 0) | INTERRUPT_ENABLE;
  223. if (!to_pci)
  224. d->local = 0x22222222;
  225. desc->last_desc_virt = d;
  226. } else {
  227. d->next_h = (u32)((u64)next >> 32);
  228. d->next_l = (u32)next | (to_pci ? WRITE_TO_PCI : 0);
  229. }
  230. d++;
  231. }
  232. return 0;
  233. }
  234. void descriptor_list_chain(struct sg_dma_desc_info *this,
  235. struct sg_dma_desc_info *next)
  236. {
  237. struct sg_dma_descriptor *d = this->last_desc_virt;
  238. u32 direction = d->next_l & WRITE_TO_PCI;
  239. if (next == NULL) {
  240. d->next_h = 0;
  241. d->next_l = direction | INTERRUPT_ENABLE | END_OF_CHAIN;
  242. } else {
  243. d->next_h = (u32)((u64)next->bus >> 32);
  244. d->next_l = (u32)next->bus | direction | INTERRUPT_ENABLE;
  245. }
  246. }
  247. void *descriptor_list_allocate(struct sg_dma_desc_info *desc, size_t bytes)
  248. {
  249. desc->size = bytes;
  250. desc->virt = dma_alloc_coherent(desc->dev, bytes,
  251. &desc->bus, GFP_KERNEL);
  252. return desc->virt;
  253. }
  254. void descriptor_list_free(struct sg_dma_desc_info *desc)
  255. {
  256. if (desc->virt)
  257. dma_free_coherent(desc->dev, desc->size,
  258. desc->virt, desc->bus);
  259. desc->virt = NULL;
  260. }
  261. void descriptor_list_interrupt_enable(struct sg_dma_desc_info *desc)
  262. {
  263. struct sg_dma_descriptor *d = desc->last_desc_virt;
  264. d->next_l |= INTERRUPT_ENABLE;
  265. }
  266. void descriptor_list_interrupt_disable(struct sg_dma_desc_info *desc)
  267. {
  268. struct sg_dma_descriptor *d = desc->last_desc_virt;
  269. d->next_l &= ~INTERRUPT_ENABLE;
  270. }
  271. void descriptor_list_loopback(struct sg_dma_desc_info *desc)
  272. {
  273. struct sg_dma_descriptor *d = desc->last_desc_virt;
  274. d->next_h = (u32)((u64)desc->bus >> 32);
  275. d->next_l = (u32)desc->bus | (d->next_l & DESCRIPTOR_FLAG_MSK);
  276. }
  277. void descriptor_list_end_of_chain(struct sg_dma_desc_info *desc)
  278. {
  279. struct sg_dma_descriptor *d = desc->last_desc_virt;
  280. d->next_l |= END_OF_CHAIN;
  281. }