dv-bfin_dma.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557
  1. /* Blackfin Direct Memory Access (DMA) Channel model.
  2. Copyright (C) 2010-2015 Free Software Foundation, Inc.
  3. Contributed by Analog Devices, Inc.
  4. This file is part of simulators.
  5. This program is free software; you can redistribute it and/or modify
  6. it under the terms of the GNU General Public License as published by
  7. the Free Software Foundation; either version 3 of the License, or
  8. (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. #include "config.h"
  16. #include "sim-main.h"
  17. #include "devices.h"
  18. #include "hw-device.h"
  19. #include "dv-bfin_dma.h"
  20. #include "dv-bfin_dmac.h"
  21. /* Note: This DMA implementation requires the producer to be the master when
  22. the peer is MDMA. The source is always a slave. This way we don't
  23. have the two DMA devices thrashing each other with one trying to
  24. write and the other trying to read. */
  25. struct bfin_dma
  26. {
  27. /* This top portion matches common dv_bfin struct. */
  28. bu32 base;
  29. struct hw *dma_master;
  30. bool acked;
  31. struct hw_event *handler;
  32. unsigned ele_size;
  33. struct hw *hw_peer;
  34. /* Order after here is important -- matches hardware MMR layout. */
  35. union {
  36. struct { bu16 ndpl, ndph; };
  37. bu32 next_desc_ptr;
  38. };
  39. union {
  40. struct { bu16 sal, sah; };
  41. bu32 start_addr;
  42. };
  43. bu16 BFIN_MMR_16 (config);
  44. bu32 _pad0;
  45. bu16 BFIN_MMR_16 (x_count);
  46. bs16 BFIN_MMR_16 (x_modify);
  47. bu16 BFIN_MMR_16 (y_count);
  48. bs16 BFIN_MMR_16 (y_modify);
  49. bu32 curr_desc_ptr, curr_addr;
  50. bu16 BFIN_MMR_16 (irq_status);
  51. bu16 BFIN_MMR_16 (peripheral_map);
  52. bu16 BFIN_MMR_16 (curr_x_count);
  53. bu32 _pad1;
  54. bu16 BFIN_MMR_16 (curr_y_count);
  55. bu32 _pad2;
  56. };
  57. #define mmr_base() offsetof(struct bfin_dma, next_desc_ptr)
  58. #define mmr_offset(mmr) (offsetof(struct bfin_dma, mmr) - mmr_base())
  59. static const char * const mmr_names[] =
  60. {
  61. "NEXT_DESC_PTR", "START_ADDR", "CONFIG", "<INV>", "X_COUNT", "X_MODIFY",
  62. "Y_COUNT", "Y_MODIFY", "CURR_DESC_PTR", "CURR_ADDR", "IRQ_STATUS",
  63. "PERIPHERAL_MAP", "CURR_X_COUNT", "<INV>", "CURR_Y_COUNT", "<INV>",
  64. };
  65. #define mmr_name(off) mmr_names[(off) / 4]
  66. static bool
  67. bfin_dma_enabled (struct bfin_dma *dma)
  68. {
  69. return (dma->config & DMAEN);
  70. }
  71. static bool
  72. bfin_dma_running (struct bfin_dma *dma)
  73. {
  74. return (dma->irq_status & DMA_RUN);
  75. }
  76. static struct hw *
  77. bfin_dma_get_peer (struct hw *me, struct bfin_dma *dma)
  78. {
  79. if (dma->hw_peer)
  80. return dma->hw_peer;
  81. return dma->hw_peer = bfin_dmac_get_peer (me, dma->peripheral_map);
  82. }
  83. static void
  84. bfin_dma_process_desc (struct hw *me, struct bfin_dma *dma)
  85. {
  86. bu8 ndsize = (dma->config & NDSIZE) >> NDSIZE_SHIFT;
  87. bu16 _flows[9], *flows = _flows;
  88. HW_TRACE ((me, "dma starting up %#x", dma->config));
  89. switch (dma->config & WDSIZE)
  90. {
  91. case WDSIZE_32:
  92. dma->ele_size = 4;
  93. break;
  94. case WDSIZE_16:
  95. dma->ele_size = 2;
  96. break;
  97. default:
  98. dma->ele_size = 1;
  99. break;
  100. }
  101. /* Address has to be mutiple of transfer size. */
  102. if (dma->start_addr & (dma->ele_size - 1))
  103. dma->irq_status |= DMA_ERR;
  104. if (dma->ele_size != (unsigned) abs (dma->x_modify))
  105. hw_abort (me, "DMA config (striding) %#x not supported (x_modify: %d)",
  106. dma->config, dma->x_modify);
  107. switch (dma->config & DMAFLOW)
  108. {
  109. case DMAFLOW_AUTO:
  110. case DMAFLOW_STOP:
  111. if (ndsize)
  112. hw_abort (me, "DMA config error: DMAFLOW_{AUTO,STOP} requires NDSIZE_0");
  113. break;
  114. case DMAFLOW_ARRAY:
  115. if (ndsize == 0 || ndsize > 7)
  116. hw_abort (me, "DMA config error: DMAFLOW_ARRAY requires NDSIZE 1...7");
  117. sim_read (hw_system (me), dma->curr_desc_ptr, (void *)flows, ndsize * 2);
  118. break;
  119. case DMAFLOW_SMALL:
  120. if (ndsize == 0 || ndsize > 8)
  121. hw_abort (me, "DMA config error: DMAFLOW_SMALL requires NDSIZE 1...8");
  122. sim_read (hw_system (me), dma->next_desc_ptr, (void *)flows, ndsize * 2);
  123. break;
  124. case DMAFLOW_LARGE:
  125. if (ndsize == 0 || ndsize > 9)
  126. hw_abort (me, "DMA config error: DMAFLOW_LARGE requires NDSIZE 1...9");
  127. sim_read (hw_system (me), dma->next_desc_ptr, (void *)flows, ndsize * 2);
  128. break;
  129. default:
  130. hw_abort (me, "DMA config error: invalid DMAFLOW %#x", dma->config);
  131. }
  132. if (ndsize)
  133. {
  134. bu8 idx;
  135. bu16 *stores[] = {
  136. &dma->sal,
  137. &dma->sah,
  138. &dma->config,
  139. &dma->x_count,
  140. (void *) &dma->x_modify,
  141. &dma->y_count,
  142. (void *) &dma->y_modify,
  143. };
  144. switch (dma->config & DMAFLOW)
  145. {
  146. case DMAFLOW_LARGE:
  147. dma->ndph = _flows[1];
  148. --ndsize;
  149. ++flows;
  150. case DMAFLOW_SMALL:
  151. dma->ndpl = _flows[0];
  152. --ndsize;
  153. ++flows;
  154. break;
  155. }
  156. for (idx = 0; idx < ndsize; ++idx)
  157. *stores[idx] = flows[idx];
  158. }
  159. dma->curr_desc_ptr = dma->next_desc_ptr;
  160. dma->curr_addr = dma->start_addr;
  161. dma->curr_x_count = dma->x_count ? : 0xffff;
  162. dma->curr_y_count = dma->y_count ? : 0xffff;
  163. }
  164. static int
  165. bfin_dma_finish_x (struct hw *me, struct bfin_dma *dma)
  166. {
  167. /* XXX: This would be the time to process the next descriptor. */
  168. /* XXX: Should this toggle Enable in dma->config ? */
  169. if (dma->config & DI_EN)
  170. hw_port_event (me, 0, 1);
  171. if ((dma->config & DMA2D) && dma->curr_y_count > 1)
  172. {
  173. dma->curr_y_count -= 1;
  174. dma->curr_x_count = dma->x_count;
  175. /* With 2D, last X transfer does not modify curr_addr. */
  176. dma->curr_addr = dma->curr_addr - dma->x_modify + dma->y_modify;
  177. return 1;
  178. }
  179. switch (dma->config & DMAFLOW)
  180. {
  181. case DMAFLOW_STOP:
  182. HW_TRACE ((me, "dma is complete"));
  183. dma->irq_status = (dma->irq_status & ~DMA_RUN) | DMA_DONE;
  184. return 0;
  185. default:
  186. bfin_dma_process_desc (me, dma);
  187. return 1;
  188. }
  189. }
  190. static void bfin_dma_hw_event_callback (struct hw *, void *);
  191. static void
  192. bfin_dma_reschedule (struct hw *me, unsigned delay)
  193. {
  194. struct bfin_dma *dma = hw_data (me);
  195. if (dma->handler)
  196. {
  197. hw_event_queue_deschedule (me, dma->handler);
  198. dma->handler = NULL;
  199. }
  200. if (!delay)
  201. return;
  202. HW_TRACE ((me, "scheduling next process in %u", delay));
  203. dma->handler = hw_event_queue_schedule (me, delay,
  204. bfin_dma_hw_event_callback, dma);
  205. }
  206. /* Chew through the DMA over and over. */
  207. static void
  208. bfin_dma_hw_event_callback (struct hw *me, void *data)
  209. {
  210. struct bfin_dma *dma = data;
  211. struct hw *peer;
  212. struct dv_bfin *bfin_peer;
  213. bu8 buf[4096];
  214. unsigned ret, nr_bytes, ele_count;
  215. dma->handler = NULL;
  216. peer = bfin_dma_get_peer (me, dma);
  217. bfin_peer = hw_data (peer);
  218. ret = 0;
  219. if (dma->x_modify < 0)
  220. /* XXX: This sucks performance wise. */
  221. nr_bytes = dma->ele_size;
  222. else
  223. nr_bytes = MIN (sizeof (buf), dma->curr_x_count * dma->ele_size);
  224. /* Pumping a chunk! */
  225. bfin_peer->dma_master = me;
  226. bfin_peer->acked = false;
  227. if (dma->config & WNR)
  228. {
  229. HW_TRACE ((me, "dma transfer to 0x%08lx length %u",
  230. (unsigned long) dma->curr_addr, nr_bytes));
  231. ret = hw_dma_read_buffer (peer, buf, 0, dma->curr_addr, nr_bytes);
  232. /* Has the DMA stalled ? abort for now. */
  233. if (ret == 0)
  234. goto reschedule;
  235. /* XXX: How to handle partial DMA transfers ? */
  236. if (ret % dma->ele_size)
  237. goto error;
  238. ret = sim_write (hw_system (me), dma->curr_addr, buf, ret);
  239. }
  240. else
  241. {
  242. HW_TRACE ((me, "dma transfer from 0x%08lx length %u",
  243. (unsigned long) dma->curr_addr, nr_bytes));
  244. ret = sim_read (hw_system (me), dma->curr_addr, buf, nr_bytes);
  245. if (ret == 0)
  246. goto reschedule;
  247. /* XXX: How to handle partial DMA transfers ? */
  248. if (ret % dma->ele_size)
  249. goto error;
  250. ret = hw_dma_write_buffer (peer, buf, 0, dma->curr_addr, ret, 0);
  251. if (ret == 0)
  252. goto reschedule;
  253. }
  254. /* Ignore partial writes. */
  255. ele_count = ret / dma->ele_size;
  256. dma->curr_addr += ele_count * dma->x_modify;
  257. dma->curr_x_count -= ele_count;
  258. if ((!dma->acked && dma->curr_x_count) || bfin_dma_finish_x (me, dma))
  259. /* Still got work to do, so schedule again. */
  260. reschedule:
  261. bfin_dma_reschedule (me, ret ? 1 : 5000);
  262. return;
  263. error:
  264. /* Don't reschedule on errors ... */
  265. dma->irq_status |= DMA_ERR;
  266. }
  267. static unsigned
  268. bfin_dma_io_write_buffer (struct hw *me, const void *source, int space,
  269. address_word addr, unsigned nr_bytes)
  270. {
  271. struct bfin_dma *dma = hw_data (me);
  272. bu32 mmr_off;
  273. bu32 value;
  274. bu16 *value16p;
  275. bu32 *value32p;
  276. void *valuep;
  277. if (nr_bytes == 4)
  278. value = dv_load_4 (source);
  279. else
  280. value = dv_load_2 (source);
  281. mmr_off = addr % dma->base;
  282. valuep = (void *)((unsigned long)dma + mmr_base() + mmr_off);
  283. value16p = valuep;
  284. value32p = valuep;
  285. HW_TRACE_WRITE ();
  286. /* XXX: All registers are RO when DMA is enabled (except IRQ_STATUS).
  287. But does the HW discard writes or send up IVGHW ? The sim
  288. simply discards atm ... */
  289. switch (mmr_off)
  290. {
  291. case mmr_offset(next_desc_ptr):
  292. case mmr_offset(start_addr):
  293. case mmr_offset(curr_desc_ptr):
  294. case mmr_offset(curr_addr):
  295. /* Don't require 32bit access as all DMA MMRs can be used as 16bit. */
  296. if (!bfin_dma_running (dma))
  297. {
  298. if (nr_bytes == 4)
  299. *value32p = value;
  300. else
  301. *value16p = value;
  302. }
  303. else
  304. HW_TRACE ((me, "discarding write while dma running"));
  305. break;
  306. case mmr_offset(x_count):
  307. case mmr_offset(x_modify):
  308. case mmr_offset(y_count):
  309. case mmr_offset(y_modify):
  310. if (!bfin_dma_running (dma))
  311. *value16p = value;
  312. break;
  313. case mmr_offset(peripheral_map):
  314. if (!bfin_dma_running (dma))
  315. {
  316. *value16p = (*value16p & CTYPE) | (value & ~CTYPE);
  317. /* Clear peripheral peer so it gets looked up again. */
  318. dma->hw_peer = NULL;
  319. }
  320. else
  321. HW_TRACE ((me, "discarding write while dma running"));
  322. break;
  323. case mmr_offset(config):
  324. /* XXX: How to handle updating CONFIG of a running channel ? */
  325. if (nr_bytes == 4)
  326. *value32p = value;
  327. else
  328. *value16p = value;
  329. if (bfin_dma_enabled (dma))
  330. {
  331. dma->irq_status |= DMA_RUN;
  332. bfin_dma_process_desc (me, dma);
  333. /* The writer is the master. */
  334. if (!(dma->peripheral_map & CTYPE) || (dma->config & WNR))
  335. bfin_dma_reschedule (me, 1);
  336. }
  337. else
  338. {
  339. dma->irq_status &= ~DMA_RUN;
  340. bfin_dma_reschedule (me, 0);
  341. }
  342. break;
  343. case mmr_offset(irq_status):
  344. dv_w1c_2 (value16p, value, DMA_DONE | DMA_ERR);
  345. break;
  346. case mmr_offset(curr_x_count):
  347. case mmr_offset(curr_y_count):
  348. if (!bfin_dma_running (dma))
  349. *value16p = value;
  350. else
  351. HW_TRACE ((me, "discarding write while dma running"));
  352. break;
  353. default:
  354. /* XXX: The HW lets the pad regions be read/written ... */
  355. dv_bfin_mmr_invalid (me, addr, nr_bytes, true);
  356. break;
  357. }
  358. return nr_bytes;
  359. }
  360. static unsigned
  361. bfin_dma_io_read_buffer (struct hw *me, void *dest, int space,
  362. address_word addr, unsigned nr_bytes)
  363. {
  364. struct bfin_dma *dma = hw_data (me);
  365. bu32 mmr_off;
  366. bu16 *value16p;
  367. bu32 *value32p;
  368. void *valuep;
  369. mmr_off = addr % dma->base;
  370. valuep = (void *)((unsigned long)dma + mmr_base() + mmr_off);
  371. value16p = valuep;
  372. value32p = valuep;
  373. HW_TRACE_READ ();
  374. /* Hardware lets you read all MMRs as 16 or 32 bits, even reserved. */
  375. if (nr_bytes == 4)
  376. dv_store_4 (dest, *value32p);
  377. else
  378. dv_store_2 (dest, *value16p);
  379. return nr_bytes;
  380. }
  381. static unsigned
  382. bfin_dma_dma_read_buffer (struct hw *me, void *dest, int space,
  383. unsigned_word addr, unsigned nr_bytes)
  384. {
  385. struct bfin_dma *dma = hw_data (me);
  386. unsigned ret, ele_count;
  387. HW_TRACE_DMA_READ ();
  388. /* If someone is trying to read from me, I have to be enabled. */
  389. if (!bfin_dma_enabled (dma) && !bfin_dma_running (dma))
  390. return 0;
  391. /* XXX: handle x_modify ... */
  392. ret = sim_read (hw_system (me), dma->curr_addr, dest, nr_bytes);
  393. /* Ignore partial writes. */
  394. ele_count = ret / dma->ele_size;
  395. /* Has the DMA stalled ? abort for now. */
  396. if (!ele_count)
  397. return 0;
  398. dma->curr_addr += ele_count * dma->x_modify;
  399. dma->curr_x_count -= ele_count;
  400. if (dma->curr_x_count == 0)
  401. bfin_dma_finish_x (me, dma);
  402. return ret;
  403. }
  404. static unsigned
  405. bfin_dma_dma_write_buffer (struct hw *me, const void *source,
  406. int space, unsigned_word addr,
  407. unsigned nr_bytes,
  408. int violate_read_only_section)
  409. {
  410. struct bfin_dma *dma = hw_data (me);
  411. unsigned ret, ele_count;
  412. HW_TRACE_DMA_WRITE ();
  413. /* If someone is trying to write to me, I have to be enabled. */
  414. if (!bfin_dma_enabled (dma) && !bfin_dma_running (dma))
  415. return 0;
  416. /* XXX: handle x_modify ... */
  417. ret = sim_write (hw_system (me), dma->curr_addr, source, nr_bytes);
  418. /* Ignore partial writes. */
  419. ele_count = ret / dma->ele_size;
  420. /* Has the DMA stalled ? abort for now. */
  421. if (!ele_count)
  422. return 0;
  423. dma->curr_addr += ele_count * dma->x_modify;
  424. dma->curr_x_count -= ele_count;
  425. if (dma->curr_x_count == 0)
  426. bfin_dma_finish_x (me, dma);
  427. return ret;
  428. }
  429. static const struct hw_port_descriptor bfin_dma_ports[] =
  430. {
  431. { "di", 0, 0, output_port, }, /* DMA Interrupt */
  432. { NULL, 0, 0, 0, },
  433. };
  434. static void
  435. attach_bfin_dma_regs (struct hw *me, struct bfin_dma *dma)
  436. {
  437. address_word attach_address;
  438. int attach_space;
  439. unsigned attach_size;
  440. reg_property_spec reg;
  441. if (hw_find_property (me, "reg") == NULL)
  442. hw_abort (me, "Missing \"reg\" property");
  443. if (!hw_find_reg_array_property (me, "reg", 0, &reg))
  444. hw_abort (me, "\"reg\" property must contain three addr/size entries");
  445. hw_unit_address_to_attach_address (hw_parent (me),
  446. &reg.address,
  447. &attach_space, &attach_address, me);
  448. hw_unit_size_to_attach_size (hw_parent (me), &reg.size, &attach_size, me);
  449. if (attach_size != BFIN_MMR_DMA_SIZE)
  450. hw_abort (me, "\"reg\" size must be %#x", BFIN_MMR_DMA_SIZE);
  451. hw_attach_address (hw_parent (me),
  452. 0, attach_space, attach_address, attach_size, me);
  453. dma->base = attach_address;
  454. }
  455. static void
  456. bfin_dma_finish (struct hw *me)
  457. {
  458. struct bfin_dma *dma;
  459. dma = HW_ZALLOC (me, struct bfin_dma);
  460. set_hw_data (me, dma);
  461. set_hw_io_read_buffer (me, bfin_dma_io_read_buffer);
  462. set_hw_io_write_buffer (me, bfin_dma_io_write_buffer);
  463. set_hw_dma_read_buffer (me, bfin_dma_dma_read_buffer);
  464. set_hw_dma_write_buffer (me, bfin_dma_dma_write_buffer);
  465. set_hw_ports (me, bfin_dma_ports);
  466. attach_bfin_dma_regs (me, dma);
  467. /* Initialize the DMA Channel. */
  468. dma->peripheral_map = bfin_dmac_default_pmap (me);
  469. }
  470. const struct hw_descriptor dv_bfin_dma_descriptor[] =
  471. {
  472. {"bfin_dma", bfin_dma_finish,},
  473. {NULL, NULL},
  474. };