s3c-pl330.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245
  1. /* linux/arch/arm/plat-samsung/s3c-pl330.c
  2. *
  3. * Copyright (C) 2010 Samsung Electronics Co. Ltd.
  4. * Jaswinder Singh <jassi.brar@samsung.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/module.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/io.h>
  15. #include <linux/slab.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/clk.h>
  18. #include <linux/err.h>
  19. #include <asm/hardware/pl330.h>
  20. #include <plat/s3c-pl330-pdata.h>
  21. /**
  22. * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC.
  23. * @busy_chan: Number of channels currently busy.
  24. * @peri: List of IDs of peripherals this DMAC can work with.
  25. * @node: To attach to the global list of DMACs.
  26. * @pi: PL330 configuration info for the DMAC.
  27. * @kmcache: Pool to quickly allocate xfers for all channels in the dmac.
  28. * @clk: Pointer of DMAC operation clock.
  29. */
  30. struct s3c_pl330_dmac {
  31. unsigned busy_chan;
  32. enum dma_ch *peri;
  33. struct list_head node;
  34. struct pl330_info *pi;
  35. struct kmem_cache *kmcache;
  36. struct clk *clk;
  37. };
  38. /**
  39. * struct s3c_pl330_xfer - A request submitted by S3C DMA clients.
  40. * @token: Xfer ID provided by the client.
  41. * @node: To attach to the list of xfers on a channel.
  42. * @px: Xfer for PL330 core.
  43. * @chan: Owner channel of this xfer.
  44. */
  45. struct s3c_pl330_xfer {
  46. void *token;
  47. struct list_head node;
  48. struct pl330_xfer px;
  49. struct s3c_pl330_chan *chan;
  50. };
  51. /**
  52. * struct s3c_pl330_chan - Logical channel to communicate with
  53. * a Physical peripheral.
  54. * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC.
  55. * NULL if the channel is available to be acquired.
  56. * @id: ID of the peripheral that this channel can communicate with.
  57. * @options: Options specified by the client.
  58. * @sdaddr: Address provided via s3c2410_dma_devconfig.
  59. * @node: To attach to the global list of channels.
  60. * @lrq: Pointer to the last submitted pl330_req to PL330 core.
  61. * @xfer_list: To manage list of xfers enqueued.
  62. * @req: Two requests to communicate with the PL330 engine.
  63. * @callback_fn: Callback function to the client.
  64. * @rqcfg: Channel configuration for the xfers.
  65. * @xfer_head: Pointer to the xfer to be next executed.
  66. * @dmac: Pointer to the DMAC that manages this channel, NULL if the
  67. * channel is available to be acquired.
  68. * @client: Client of this channel. NULL if the
  69. * channel is available to be acquired.
  70. */
  71. struct s3c_pl330_chan {
  72. void *pl330_chan_id;
  73. enum dma_ch id;
  74. unsigned int options;
  75. unsigned long sdaddr;
  76. struct list_head node;
  77. struct pl330_req *lrq;
  78. struct list_head xfer_list;
  79. struct pl330_req req[2];
  80. s3c2410_dma_cbfn_t callback_fn;
  81. struct pl330_reqcfg rqcfg;
  82. struct s3c_pl330_xfer *xfer_head;
  83. struct s3c_pl330_dmac *dmac;
  84. struct s3c2410_dma_client *client;
  85. };
  86. /* All DMACs in the platform */
  87. static LIST_HEAD(dmac_list);
  88. /* All channels to peripherals in the platform */
  89. static LIST_HEAD(chan_list);
  90. /*
  91. * Since we add resources(DMACs and Channels) to the global pool,
  92. * we need to guard access to the resources using a global lock
  93. */
  94. static DEFINE_SPINLOCK(res_lock);
  95. /* Returns the channel with ID 'id' in the chan_list */
  96. static struct s3c_pl330_chan *id_to_chan(const enum dma_ch id)
  97. {
  98. struct s3c_pl330_chan *ch;
  99. list_for_each_entry(ch, &chan_list, node)
  100. if (ch->id == id)
  101. return ch;
  102. return NULL;
  103. }
  104. /* Allocate a new channel with ID 'id' and add to chan_list */
  105. static void chan_add(const enum dma_ch id)
  106. {
  107. struct s3c_pl330_chan *ch = id_to_chan(id);
  108. /* Return if the channel already exists */
  109. if (ch)
  110. return;
  111. ch = kmalloc(sizeof(*ch), GFP_KERNEL);
  112. /* Return silently to work with other channels */
  113. if (!ch)
  114. return;
  115. ch->id = id;
  116. ch->dmac = NULL;
  117. list_add_tail(&ch->node, &chan_list);
  118. }
  119. /* If the channel is not yet acquired by any client */
  120. static bool chan_free(struct s3c_pl330_chan *ch)
  121. {
  122. if (!ch)
  123. return false;
  124. /* Channel points to some DMAC only when it's acquired */
  125. return ch->dmac ? false : true;
  126. }
  127. /*
  128. * Returns 0 is peripheral i/f is invalid or not present on the dmac.
  129. * Index + 1, otherwise.
  130. */
  131. static unsigned iface_of_dmac(struct s3c_pl330_dmac *dmac, enum dma_ch ch_id)
  132. {
  133. enum dma_ch *id = dmac->peri;
  134. int i;
  135. /* Discount invalid markers */
  136. if (ch_id == DMACH_MAX)
  137. return 0;
  138. for (i = 0; i < PL330_MAX_PERI; i++)
  139. if (id[i] == ch_id)
  140. return i + 1;
  141. return 0;
  142. }
  143. /* If all channel threads of the DMAC are busy */
  144. static inline bool dmac_busy(struct s3c_pl330_dmac *dmac)
  145. {
  146. struct pl330_info *pi = dmac->pi;
  147. return (dmac->busy_chan < pi->pcfg.num_chan) ? false : true;
  148. }
  149. /*
  150. * Returns the number of free channels that
  151. * can be handled by this dmac only.
  152. */
  153. static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac *dmac)
  154. {
  155. enum dma_ch *id = dmac->peri;
  156. struct s3c_pl330_dmac *d;
  157. struct s3c_pl330_chan *ch;
  158. unsigned found, count = 0;
  159. enum dma_ch p;
  160. int i;
  161. for (i = 0; i < PL330_MAX_PERI; i++) {
  162. p = id[i];
  163. ch = id_to_chan(p);
  164. if (p == DMACH_MAX || !chan_free(ch))
  165. continue;
  166. found = 0;
  167. list_for_each_entry(d, &dmac_list, node) {
  168. if (d != dmac && iface_of_dmac(d, ch->id)) {
  169. found = 1;
  170. break;
  171. }
  172. }
  173. if (!found)
  174. count++;
  175. }
  176. return count;
  177. }
  178. /*
  179. * Measure of suitability of 'dmac' handling 'ch'
  180. *
  181. * 0 indicates 'dmac' can not handle 'ch' either
  182. * because it is not supported by the hardware or
  183. * because all dmac channels are currently busy.
  184. *
  185. * >0 vlaue indicates 'dmac' has the capability.
  186. * The bigger the value the more suitable the dmac.
  187. */
  188. #define MAX_SUIT UINT_MAX
  189. #define MIN_SUIT 0
  190. static unsigned suitablility(struct s3c_pl330_dmac *dmac,
  191. struct s3c_pl330_chan *ch)
  192. {
  193. struct pl330_info *pi = dmac->pi;
  194. enum dma_ch *id = dmac->peri;
  195. struct s3c_pl330_dmac *d;
  196. unsigned s;
  197. int i;
  198. s = MIN_SUIT;
  199. /* If all the DMAC channel threads are busy */
  200. if (dmac_busy(dmac))
  201. return s;
  202. for (i = 0; i < PL330_MAX_PERI; i++)
  203. if (id[i] == ch->id)
  204. break;
  205. /* If the 'dmac' can't talk to 'ch' */
  206. if (i == PL330_MAX_PERI)
  207. return s;
  208. s = MAX_SUIT;
  209. list_for_each_entry(d, &dmac_list, node) {
  210. /*
  211. * If some other dmac can talk to this
  212. * peri and has some channel free.
  213. */
  214. if (d != dmac && iface_of_dmac(d, ch->id) && !dmac_busy(d)) {
  215. s = 0;
  216. break;
  217. }
  218. }
  219. if (s)
  220. return s;
  221. s = 100;
  222. /* Good if free chans are more, bad otherwise */
  223. s += (pi->pcfg.num_chan - dmac->busy_chan) - ch_onlyby_dmac(dmac);
  224. return s;
  225. }
  226. /* More than one DMAC may have capability to transfer data with the
  227. * peripheral. This function assigns most suitable DMAC to manage the
  228. * channel and hence communicate with the peripheral.
  229. */
  230. static struct s3c_pl330_dmac *map_chan_to_dmac(struct s3c_pl330_chan *ch)
  231. {
  232. struct s3c_pl330_dmac *d, *dmac = NULL;
  233. unsigned sn, sl = MIN_SUIT;
  234. list_for_each_entry(d, &dmac_list, node) {
  235. sn = suitablility(d, ch);
  236. if (sn == MAX_SUIT)
  237. return d;
  238. if (sn > sl)
  239. dmac = d;
  240. }
  241. return dmac;
  242. }
  243. /* Acquire the channel for peripheral 'id' */
  244. static struct s3c_pl330_chan *chan_acquire(const enum dma_ch id)
  245. {
  246. struct s3c_pl330_chan *ch = id_to_chan(id);
  247. struct s3c_pl330_dmac *dmac;
  248. /* If the channel doesn't exist or is already acquired */
  249. if (!ch || !chan_free(ch)) {
  250. ch = NULL;
  251. goto acq_exit;
  252. }
  253. dmac = map_chan_to_dmac(ch);
  254. /* If couldn't map */
  255. if (!dmac) {
  256. ch = NULL;
  257. goto acq_exit;
  258. }
  259. dmac->busy_chan++;
  260. ch->dmac = dmac;
  261. acq_exit:
  262. return ch;
  263. }
  264. /* Delete xfer from the queue */
  265. static inline void del_from_queue(struct s3c_pl330_xfer *xfer)
  266. {
  267. struct s3c_pl330_xfer *t;
  268. struct s3c_pl330_chan *ch;
  269. int found;
  270. if (!xfer)
  271. return;
  272. ch = xfer->chan;
  273. /* Make sure xfer is in the queue */
  274. found = 0;
  275. list_for_each_entry(t, &ch->xfer_list, node)
  276. if (t == xfer) {
  277. found = 1;
  278. break;
  279. }
  280. if (!found)
  281. return;
  282. /* If xfer is last entry in the queue */
  283. if (xfer->node.next == &ch->xfer_list)
  284. t = list_entry(ch->xfer_list.next,
  285. struct s3c_pl330_xfer, node);
  286. else
  287. t = list_entry(xfer->node.next,
  288. struct s3c_pl330_xfer, node);
  289. /* If there was only one node left */
  290. if (t == xfer)
  291. ch->xfer_head = NULL;
  292. else if (ch->xfer_head == xfer)
  293. ch->xfer_head = t;
  294. list_del(&xfer->node);
  295. }
  296. /* Provides pointer to the next xfer in the queue.
  297. * If CIRCULAR option is set, the list is left intact,
  298. * otherwise the xfer is removed from the list.
  299. * Forced delete 'pluck' can be set to override the CIRCULAR option.
  300. */
  301. static struct s3c_pl330_xfer *get_from_queue(struct s3c_pl330_chan *ch,
  302. int pluck)
  303. {
  304. struct s3c_pl330_xfer *xfer = ch->xfer_head;
  305. if (!xfer)
  306. return NULL;
  307. /* If xfer is last entry in the queue */
  308. if (xfer->node.next == &ch->xfer_list)
  309. ch->xfer_head = list_entry(ch->xfer_list.next,
  310. struct s3c_pl330_xfer, node);
  311. else
  312. ch->xfer_head = list_entry(xfer->node.next,
  313. struct s3c_pl330_xfer, node);
  314. if (pluck || !(ch->options & S3C2410_DMAF_CIRCULAR))
  315. del_from_queue(xfer);
  316. return xfer;
  317. }
  318. static inline void add_to_queue(struct s3c_pl330_chan *ch,
  319. struct s3c_pl330_xfer *xfer, int front)
  320. {
  321. struct pl330_xfer *xt;
  322. /* If queue empty */
  323. if (ch->xfer_head == NULL)
  324. ch->xfer_head = xfer;
  325. xt = &ch->xfer_head->px;
  326. /* If the head already submitted (CIRCULAR head) */
  327. if (ch->options & S3C2410_DMAF_CIRCULAR &&
  328. (xt == ch->req[0].x || xt == ch->req[1].x))
  329. ch->xfer_head = xfer;
  330. /* If this is a resubmission, it should go at the head */
  331. if (front) {
  332. ch->xfer_head = xfer;
  333. list_add(&xfer->node, &ch->xfer_list);
  334. } else {
  335. list_add_tail(&xfer->node, &ch->xfer_list);
  336. }
  337. }
  338. static inline void _finish_off(struct s3c_pl330_xfer *xfer,
  339. enum s3c2410_dma_buffresult res, int ffree)
  340. {
  341. struct s3c_pl330_chan *ch;
  342. if (!xfer)
  343. return;
  344. ch = xfer->chan;
  345. /* Do callback */
  346. if (ch->callback_fn)
  347. ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res);
  348. /* Force Free or if buffer is not needed anymore */
  349. if (ffree || !(ch->options & S3C2410_DMAF_CIRCULAR))
  350. kmem_cache_free(ch->dmac->kmcache, xfer);
  351. }
  352. static inline int s3c_pl330_submit(struct s3c_pl330_chan *ch,
  353. struct pl330_req *r)
  354. {
  355. struct s3c_pl330_xfer *xfer;
  356. int ret = 0;
  357. /* If already submitted */
  358. if (r->x)
  359. return 0;
  360. xfer = get_from_queue(ch, 0);
  361. if (xfer) {
  362. r->x = &xfer->px;
  363. /* Use max bandwidth for M<->M xfers */
  364. if (r->rqtype == MEMTOMEM) {
  365. struct pl330_info *pi = xfer->chan->dmac->pi;
  366. int burst = 1 << ch->rqcfg.brst_size;
  367. u32 bytes = r->x->bytes;
  368. int bl;
  369. bl = pi->pcfg.data_bus_width / 8;
  370. bl *= pi->pcfg.data_buf_dep;
  371. bl /= burst;
  372. /* src/dst_burst_len can't be more than 16 */
  373. if (bl > 16)
  374. bl = 16;
  375. while (bl > 1) {
  376. if (!(bytes % (bl * burst)))
  377. break;
  378. bl--;
  379. }
  380. ch->rqcfg.brst_len = bl;
  381. } else {
  382. ch->rqcfg.brst_len = 1;
  383. }
  384. ret = pl330_submit_req(ch->pl330_chan_id, r);
  385. /* If submission was successful */
  386. if (!ret) {
  387. ch->lrq = r; /* latest submitted req */
  388. return 0;
  389. }
  390. r->x = NULL;
  391. /* If both of the PL330 ping-pong buffers filled */
  392. if (ret == -EAGAIN) {
  393. dev_err(ch->dmac->pi->dev, "%s:%d!\n",
  394. __func__, __LINE__);
  395. /* Queue back again */
  396. add_to_queue(ch, xfer, 1);
  397. ret = 0;
  398. } else {
  399. dev_err(ch->dmac->pi->dev, "%s:%d!\n",
  400. __func__, __LINE__);
  401. _finish_off(xfer, S3C2410_RES_ERR, 0);
  402. }
  403. }
  404. return ret;
  405. }
  406. static void s3c_pl330_rq(struct s3c_pl330_chan *ch,
  407. struct pl330_req *r, enum pl330_op_err err)
  408. {
  409. unsigned long flags;
  410. struct s3c_pl330_xfer *xfer;
  411. struct pl330_xfer *xl = r->x;
  412. enum s3c2410_dma_buffresult res;
  413. spin_lock_irqsave(&res_lock, flags);
  414. r->x = NULL;
  415. s3c_pl330_submit(ch, r);
  416. spin_unlock_irqrestore(&res_lock, flags);
  417. /* Map result to S3C DMA API */
  418. if (err == PL330_ERR_NONE)
  419. res = S3C2410_RES_OK;
  420. else if (err == PL330_ERR_ABORT)
  421. res = S3C2410_RES_ABORT;
  422. else
  423. res = S3C2410_RES_ERR;
  424. /* If last request had some xfer */
  425. if (xl) {
  426. xfer = container_of(xl, struct s3c_pl330_xfer, px);
  427. _finish_off(xfer, res, 0);
  428. } else {
  429. dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n",
  430. __func__, __LINE__);
  431. }
  432. }
  433. static void s3c_pl330_rq0(void *token, enum pl330_op_err err)
  434. {
  435. struct pl330_req *r = token;
  436. struct s3c_pl330_chan *ch = container_of(r,
  437. struct s3c_pl330_chan, req[0]);
  438. s3c_pl330_rq(ch, r, err);
  439. }
  440. static void s3c_pl330_rq1(void *token, enum pl330_op_err err)
  441. {
  442. struct pl330_req *r = token;
  443. struct s3c_pl330_chan *ch = container_of(r,
  444. struct s3c_pl330_chan, req[1]);
  445. s3c_pl330_rq(ch, r, err);
  446. }
  447. /* Release an acquired channel */
  448. static void chan_release(struct s3c_pl330_chan *ch)
  449. {
  450. struct s3c_pl330_dmac *dmac;
  451. if (chan_free(ch))
  452. return;
  453. dmac = ch->dmac;
  454. ch->dmac = NULL;
  455. dmac->busy_chan--;
  456. }
  457. int s3c2410_dma_ctrl(enum dma_ch id, enum s3c2410_chan_op op)
  458. {
  459. struct s3c_pl330_xfer *xfer;
  460. enum pl330_chan_op pl330op;
  461. struct s3c_pl330_chan *ch;
  462. unsigned long flags;
  463. int idx, ret;
  464. spin_lock_irqsave(&res_lock, flags);
  465. ch = id_to_chan(id);
  466. if (!ch || chan_free(ch)) {
  467. ret = -EINVAL;
  468. goto ctrl_exit;
  469. }
  470. switch (op) {
  471. case S3C2410_DMAOP_START:
  472. /* Make sure both reqs are enqueued */
  473. idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
  474. s3c_pl330_submit(ch, &ch->req[idx]);
  475. s3c_pl330_submit(ch, &ch->req[1 - idx]);
  476. pl330op = PL330_OP_START;
  477. break;
  478. case S3C2410_DMAOP_STOP:
  479. pl330op = PL330_OP_ABORT;
  480. break;
  481. case S3C2410_DMAOP_FLUSH:
  482. pl330op = PL330_OP_FLUSH;
  483. break;
  484. case S3C2410_DMAOP_PAUSE:
  485. case S3C2410_DMAOP_RESUME:
  486. case S3C2410_DMAOP_TIMEOUT:
  487. case S3C2410_DMAOP_STARTED:
  488. spin_unlock_irqrestore(&res_lock, flags);
  489. return 0;
  490. default:
  491. spin_unlock_irqrestore(&res_lock, flags);
  492. return -EINVAL;
  493. }
  494. ret = pl330_chan_ctrl(ch->pl330_chan_id, pl330op);
  495. if (pl330op == PL330_OP_START) {
  496. spin_unlock_irqrestore(&res_lock, flags);
  497. return ret;
  498. }
  499. idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
  500. /* Abort the current xfer */
  501. if (ch->req[idx].x) {
  502. xfer = container_of(ch->req[idx].x,
  503. struct s3c_pl330_xfer, px);
  504. /* Drop xfer during FLUSH */
  505. if (pl330op == PL330_OP_FLUSH)
  506. del_from_queue(xfer);
  507. ch->req[idx].x = NULL;
  508. spin_unlock_irqrestore(&res_lock, flags);
  509. _finish_off(xfer, S3C2410_RES_ABORT,
  510. pl330op == PL330_OP_FLUSH ? 1 : 0);
  511. spin_lock_irqsave(&res_lock, flags);
  512. }
  513. /* Flush the whole queue */
  514. if (pl330op == PL330_OP_FLUSH) {
  515. if (ch->req[1 - idx].x) {
  516. xfer = container_of(ch->req[1 - idx].x,
  517. struct s3c_pl330_xfer, px);
  518. del_from_queue(xfer);
  519. ch->req[1 - idx].x = NULL;
  520. spin_unlock_irqrestore(&res_lock, flags);
  521. _finish_off(xfer, S3C2410_RES_ABORT, 1);
  522. spin_lock_irqsave(&res_lock, flags);
  523. }
  524. /* Finish off the remaining in the queue */
  525. xfer = ch->xfer_head;
  526. while (xfer) {
  527. del_from_queue(xfer);
  528. spin_unlock_irqrestore(&res_lock, flags);
  529. _finish_off(xfer, S3C2410_RES_ABORT, 1);
  530. spin_lock_irqsave(&res_lock, flags);
  531. xfer = ch->xfer_head;
  532. }
  533. }
  534. ctrl_exit:
  535. spin_unlock_irqrestore(&res_lock, flags);
  536. return ret;
  537. }
  538. EXPORT_SYMBOL(s3c2410_dma_ctrl);
  539. int s3c2410_dma_enqueue(enum dma_ch id, void *token,
  540. dma_addr_t addr, int size)
  541. {
  542. struct s3c_pl330_chan *ch;
  543. struct s3c_pl330_xfer *xfer;
  544. unsigned long flags;
  545. int idx, ret = 0;
  546. spin_lock_irqsave(&res_lock, flags);
  547. ch = id_to_chan(id);
  548. /* Error if invalid or free channel */
  549. if (!ch || chan_free(ch)) {
  550. ret = -EINVAL;
  551. goto enq_exit;
  552. }
  553. /* Error if size is unaligned */
  554. if (ch->rqcfg.brst_size && size % (1 << ch->rqcfg.brst_size)) {
  555. ret = -EINVAL;
  556. goto enq_exit;
  557. }
  558. xfer = kmem_cache_alloc(ch->dmac->kmcache, GFP_ATOMIC);
  559. if (!xfer) {
  560. ret = -ENOMEM;
  561. goto enq_exit;
  562. }
  563. xfer->token = token;
  564. xfer->chan = ch;
  565. xfer->px.bytes = size;
  566. xfer->px.next = NULL; /* Single request */
  567. /* For S3C DMA API, direction is always fixed for all xfers */
  568. if (ch->req[0].rqtype == MEMTODEV) {
  569. xfer->px.src_addr = addr;
  570. xfer->px.dst_addr = ch->sdaddr;
  571. } else {
  572. xfer->px.src_addr = ch->sdaddr;
  573. xfer->px.dst_addr = addr;
  574. }
  575. add_to_queue(ch, xfer, 0);
  576. /* Try submitting on either request */
  577. idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
  578. if (!ch->req[idx].x)
  579. s3c_pl330_submit(ch, &ch->req[idx]);
  580. else
  581. s3c_pl330_submit(ch, &ch->req[1 - idx]);
  582. spin_unlock_irqrestore(&res_lock, flags);
  583. if (ch->options & S3C2410_DMAF_AUTOSTART)
  584. s3c2410_dma_ctrl(id, S3C2410_DMAOP_START);
  585. return 0;
  586. enq_exit:
  587. spin_unlock_irqrestore(&res_lock, flags);
  588. return ret;
  589. }
  590. EXPORT_SYMBOL(s3c2410_dma_enqueue);
  591. int s3c2410_dma_request(enum dma_ch id,
  592. struct s3c2410_dma_client *client,
  593. void *dev)
  594. {
  595. struct s3c_pl330_dmac *dmac;
  596. struct s3c_pl330_chan *ch;
  597. unsigned long flags;
  598. int ret = 0;
  599. spin_lock_irqsave(&res_lock, flags);
  600. ch = chan_acquire(id);
  601. if (!ch) {
  602. ret = -EBUSY;
  603. goto req_exit;
  604. }
  605. dmac = ch->dmac;
  606. ch->pl330_chan_id = pl330_request_channel(dmac->pi);
  607. if (!ch->pl330_chan_id) {
  608. chan_release(ch);
  609. ret = -EBUSY;
  610. goto req_exit;
  611. }
  612. ch->client = client;
  613. ch->options = 0; /* Clear any option */
  614. ch->callback_fn = NULL; /* Clear any callback */
  615. ch->lrq = NULL;
  616. ch->rqcfg.brst_size = 2; /* Default word size */
  617. ch->rqcfg.swap = SWAP_NO;
  618. ch->rqcfg.scctl = SCCTRL0; /* Noncacheable and nonbufferable */
  619. ch->rqcfg.dcctl = DCCTRL0; /* Noncacheable and nonbufferable */
  620. ch->rqcfg.privileged = 0;
  621. ch->rqcfg.insnaccess = 0;
  622. /* Set invalid direction */
  623. ch->req[0].rqtype = DEVTODEV;
  624. ch->req[1].rqtype = ch->req[0].rqtype;
  625. ch->req[0].cfg = &ch->rqcfg;
  626. ch->req[1].cfg = ch->req[0].cfg;
  627. ch->req[0].peri = iface_of_dmac(dmac, id) - 1; /* Original index */
  628. ch->req[1].peri = ch->req[0].peri;
  629. ch->req[0].token = &ch->req[0];
  630. ch->req[0].xfer_cb = s3c_pl330_rq0;
  631. ch->req[1].token = &ch->req[1];
  632. ch->req[1].xfer_cb = s3c_pl330_rq1;
  633. ch->req[0].x = NULL;
  634. ch->req[1].x = NULL;
  635. /* Reset xfer list */
  636. INIT_LIST_HEAD(&ch->xfer_list);
  637. ch->xfer_head = NULL;
  638. req_exit:
  639. spin_unlock_irqrestore(&res_lock, flags);
  640. return ret;
  641. }
  642. EXPORT_SYMBOL(s3c2410_dma_request);
  643. int s3c2410_dma_free(enum dma_ch id, struct s3c2410_dma_client *client)
  644. {
  645. struct s3c_pl330_chan *ch;
  646. struct s3c_pl330_xfer *xfer;
  647. unsigned long flags;
  648. int ret = 0;
  649. unsigned idx;
  650. spin_lock_irqsave(&res_lock, flags);
  651. ch = id_to_chan(id);
  652. if (!ch || chan_free(ch))
  653. goto free_exit;
  654. /* Refuse if someone else wanted to free the channel */
  655. if (ch->client != client) {
  656. ret = -EBUSY;
  657. goto free_exit;
  658. }
  659. /* Stop any active xfer, Flushe the queue and do callbacks */
  660. pl330_chan_ctrl(ch->pl330_chan_id, PL330_OP_FLUSH);
  661. /* Abort the submitted requests */
  662. idx = (ch->lrq == &ch->req[0]) ? 1 : 0;
  663. if (ch->req[idx].x) {
  664. xfer = container_of(ch->req[idx].x,
  665. struct s3c_pl330_xfer, px);
  666. ch->req[idx].x = NULL;
  667. del_from_queue(xfer);
  668. spin_unlock_irqrestore(&res_lock, flags);
  669. _finish_off(xfer, S3C2410_RES_ABORT, 1);
  670. spin_lock_irqsave(&res_lock, flags);
  671. }
  672. if (ch->req[1 - idx].x) {
  673. xfer = container_of(ch->req[1 - idx].x,
  674. struct s3c_pl330_xfer, px);
  675. ch->req[1 - idx].x = NULL;
  676. del_from_queue(xfer);
  677. spin_unlock_irqrestore(&res_lock, flags);
  678. _finish_off(xfer, S3C2410_RES_ABORT, 1);
  679. spin_lock_irqsave(&res_lock, flags);
  680. }
  681. /* Pluck and Abort the queued requests in order */
  682. do {
  683. xfer = get_from_queue(ch, 1);
  684. spin_unlock_irqrestore(&res_lock, flags);
  685. _finish_off(xfer, S3C2410_RES_ABORT, 1);
  686. spin_lock_irqsave(&res_lock, flags);
  687. } while (xfer);
  688. ch->client = NULL;
  689. pl330_release_channel(ch->pl330_chan_id);
  690. ch->pl330_chan_id = NULL;
  691. chan_release(ch);
  692. free_exit:
  693. spin_unlock_irqrestore(&res_lock, flags);
  694. return ret;
  695. }
  696. EXPORT_SYMBOL(s3c2410_dma_free);
  697. int s3c2410_dma_config(enum dma_ch id, int xferunit)
  698. {
  699. struct s3c_pl330_chan *ch;
  700. struct pl330_info *pi;
  701. unsigned long flags;
  702. int i, dbwidth, ret = 0;
  703. spin_lock_irqsave(&res_lock, flags);
  704. ch = id_to_chan(id);
  705. if (!ch || chan_free(ch)) {
  706. ret = -EINVAL;
  707. goto cfg_exit;
  708. }
  709. pi = ch->dmac->pi;
  710. dbwidth = pi->pcfg.data_bus_width / 8;
  711. /* Max size of xfer can be pcfg.data_bus_width */
  712. if (xferunit > dbwidth) {
  713. ret = -EINVAL;
  714. goto cfg_exit;
  715. }
  716. i = 0;
  717. while (xferunit != (1 << i))
  718. i++;
  719. /* If valid value */
  720. if (xferunit == (1 << i))
  721. ch->rqcfg.brst_size = i;
  722. else
  723. ret = -EINVAL;
  724. cfg_exit:
  725. spin_unlock_irqrestore(&res_lock, flags);
  726. return ret;
  727. }
  728. EXPORT_SYMBOL(s3c2410_dma_config);
  729. /* Options that are supported by this driver */
  730. #define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART)
  731. int s3c2410_dma_setflags(enum dma_ch id, unsigned int options)
  732. {
  733. struct s3c_pl330_chan *ch;
  734. unsigned long flags;
  735. int ret = 0;
  736. spin_lock_irqsave(&res_lock, flags);
  737. ch = id_to_chan(id);
  738. if (!ch || chan_free(ch) || options & ~(S3C_PL330_FLAGS))
  739. ret = -EINVAL;
  740. else
  741. ch->options = options;
  742. spin_unlock_irqrestore(&res_lock, flags);
  743. return 0;
  744. }
  745. EXPORT_SYMBOL(s3c2410_dma_setflags);
  746. int s3c2410_dma_set_buffdone_fn(enum dma_ch id, s3c2410_dma_cbfn_t rtn)
  747. {
  748. struct s3c_pl330_chan *ch;
  749. unsigned long flags;
  750. int ret = 0;
  751. spin_lock_irqsave(&res_lock, flags);
  752. ch = id_to_chan(id);
  753. if (!ch || chan_free(ch))
  754. ret = -EINVAL;
  755. else
  756. ch->callback_fn = rtn;
  757. spin_unlock_irqrestore(&res_lock, flags);
  758. return ret;
  759. }
  760. EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
  761. int s3c2410_dma_devconfig(enum dma_ch id, enum s3c2410_dmasrc source,
  762. unsigned long address)
  763. {
  764. struct s3c_pl330_chan *ch;
  765. unsigned long flags;
  766. int ret = 0;
  767. spin_lock_irqsave(&res_lock, flags);
  768. ch = id_to_chan(id);
  769. if (!ch || chan_free(ch)) {
  770. ret = -EINVAL;
  771. goto devcfg_exit;
  772. }
  773. switch (source) {
  774. case S3C2410_DMASRC_HW: /* P->M */
  775. ch->req[0].rqtype = DEVTOMEM;
  776. ch->req[1].rqtype = DEVTOMEM;
  777. ch->rqcfg.src_inc = 0;
  778. ch->rqcfg.dst_inc = 1;
  779. break;
  780. case S3C2410_DMASRC_MEM: /* M->P */
  781. ch->req[0].rqtype = MEMTODEV;
  782. ch->req[1].rqtype = MEMTODEV;
  783. ch->rqcfg.src_inc = 1;
  784. ch->rqcfg.dst_inc = 0;
  785. break;
  786. default:
  787. ret = -EINVAL;
  788. goto devcfg_exit;
  789. }
  790. ch->sdaddr = address;
  791. devcfg_exit:
  792. spin_unlock_irqrestore(&res_lock, flags);
  793. return ret;
  794. }
  795. EXPORT_SYMBOL(s3c2410_dma_devconfig);
  796. int s3c2410_dma_getposition(enum dma_ch id, dma_addr_t *src, dma_addr_t *dst)
  797. {
  798. struct s3c_pl330_chan *ch = id_to_chan(id);
  799. struct pl330_chanstatus status;
  800. int ret;
  801. if (!ch || chan_free(ch))
  802. return -EINVAL;
  803. ret = pl330_chan_status(ch->pl330_chan_id, &status);
  804. if (ret < 0)
  805. return ret;
  806. *src = status.src_addr;
  807. *dst = status.dst_addr;
  808. return 0;
  809. }
  810. EXPORT_SYMBOL(s3c2410_dma_getposition);
  811. static irqreturn_t pl330_irq_handler(int irq, void *data)
  812. {
  813. if (pl330_update(data))
  814. return IRQ_HANDLED;
  815. else
  816. return IRQ_NONE;
  817. }
  818. static int pl330_probe(struct platform_device *pdev)
  819. {
  820. struct s3c_pl330_dmac *s3c_pl330_dmac;
  821. struct s3c_pl330_platdata *pl330pd;
  822. struct pl330_info *pl330_info;
  823. struct resource *res;
  824. int i, ret, irq;
  825. pl330pd = pdev->dev.platform_data;
  826. /* Can't do without the list of _32_ peripherals */
  827. if (!pl330pd || !pl330pd->peri) {
  828. dev_err(&pdev->dev, "platform data missing!\n");
  829. return -ENODEV;
  830. }
  831. pl330_info = kzalloc(sizeof(*pl330_info), GFP_KERNEL);
  832. if (!pl330_info)
  833. return -ENOMEM;
  834. pl330_info->pl330_data = NULL;
  835. pl330_info->dev = &pdev->dev;
  836. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  837. if (!res) {
  838. ret = -ENODEV;
  839. goto probe_err1;
  840. }
  841. request_mem_region(res->start, resource_size(res), pdev->name);
  842. pl330_info->base = ioremap(res->start, resource_size(res));
  843. if (!pl330_info->base) {
  844. ret = -ENXIO;
  845. goto probe_err2;
  846. }
  847. irq = platform_get_irq(pdev, 0);
  848. if (irq < 0) {
  849. ret = irq;
  850. goto probe_err3;
  851. }
  852. ret = request_irq(irq, pl330_irq_handler, 0,
  853. dev_name(&pdev->dev), pl330_info);
  854. if (ret)
  855. goto probe_err4;
  856. /* Allocate a new DMAC */
  857. s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL);
  858. if (!s3c_pl330_dmac) {
  859. ret = -ENOMEM;
  860. goto probe_err5;
  861. }
  862. /* Get operation clock and enable it */
  863. s3c_pl330_dmac->clk = clk_get(&pdev->dev, "pdma");
  864. if (IS_ERR(s3c_pl330_dmac->clk)) {
  865. dev_err(&pdev->dev, "Cannot get operation clock.\n");
  866. ret = -EINVAL;
  867. goto probe_err6;
  868. }
  869. clk_enable(s3c_pl330_dmac->clk);
  870. ret = pl330_add(pl330_info);
  871. if (ret)
  872. goto probe_err7;
  873. /* Hook the info */
  874. s3c_pl330_dmac->pi = pl330_info;
  875. /* No busy channels */
  876. s3c_pl330_dmac->busy_chan = 0;
  877. s3c_pl330_dmac->kmcache = kmem_cache_create(dev_name(&pdev->dev),
  878. sizeof(struct s3c_pl330_xfer), 0, 0, NULL);
  879. if (!s3c_pl330_dmac->kmcache) {
  880. ret = -ENOMEM;
  881. goto probe_err8;
  882. }
  883. /* Get the list of peripherals */
  884. s3c_pl330_dmac->peri = pl330pd->peri;
  885. /* Attach to the list of DMACs */
  886. list_add_tail(&s3c_pl330_dmac->node, &dmac_list);
  887. /* Create a channel for each peripheral in the DMAC
  888. * that is, if it doesn't already exist
  889. */
  890. for (i = 0; i < PL330_MAX_PERI; i++)
  891. if (s3c_pl330_dmac->peri[i] != DMACH_MAX)
  892. chan_add(s3c_pl330_dmac->peri[i]);
  893. printk(KERN_INFO
  894. "Loaded driver for PL330 DMAC-%d %s\n", pdev->id, pdev->name);
  895. printk(KERN_INFO
  896. "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
  897. pl330_info->pcfg.data_buf_dep,
  898. pl330_info->pcfg.data_bus_width / 8, pl330_info->pcfg.num_chan,
  899. pl330_info->pcfg.num_peri, pl330_info->pcfg.num_events);
  900. return 0;
  901. probe_err8:
  902. pl330_del(pl330_info);
  903. probe_err7:
  904. clk_disable(s3c_pl330_dmac->clk);
  905. clk_put(s3c_pl330_dmac->clk);
  906. probe_err6:
  907. kfree(s3c_pl330_dmac);
  908. probe_err5:
  909. free_irq(irq, pl330_info);
  910. probe_err4:
  911. probe_err3:
  912. iounmap(pl330_info->base);
  913. probe_err2:
  914. release_mem_region(res->start, resource_size(res));
  915. probe_err1:
  916. kfree(pl330_info);
  917. return ret;
  918. }
  919. static int pl330_remove(struct platform_device *pdev)
  920. {
  921. struct s3c_pl330_dmac *dmac, *d;
  922. struct s3c_pl330_chan *ch;
  923. unsigned long flags;
  924. int del, found;
  925. if (!pdev->dev.platform_data)
  926. return -EINVAL;
  927. spin_lock_irqsave(&res_lock, flags);
  928. found = 0;
  929. list_for_each_entry(d, &dmac_list, node)
  930. if (d->pi->dev == &pdev->dev) {
  931. found = 1;
  932. break;
  933. }
  934. if (!found) {
  935. spin_unlock_irqrestore(&res_lock, flags);
  936. return 0;
  937. }
  938. dmac = d;
  939. /* Remove all Channels that are managed only by this DMAC */
  940. list_for_each_entry(ch, &chan_list, node) {
  941. /* Only channels that are handled by this DMAC */
  942. if (iface_of_dmac(dmac, ch->id))
  943. del = 1;
  944. else
  945. continue;
  946. /* Don't remove if some other DMAC has it too */
  947. list_for_each_entry(d, &dmac_list, node)
  948. if (d != dmac && iface_of_dmac(d, ch->id)) {
  949. del = 0;
  950. break;
  951. }
  952. if (del) {
  953. spin_unlock_irqrestore(&res_lock, flags);
  954. s3c2410_dma_free(ch->id, ch->client);
  955. spin_lock_irqsave(&res_lock, flags);
  956. list_del(&ch->node);
  957. kfree(ch);
  958. }
  959. }
  960. /* Disable operation clock */
  961. clk_disable(dmac->clk);
  962. clk_put(dmac->clk);
  963. /* Remove the DMAC */
  964. list_del(&dmac->node);
  965. kfree(dmac);
  966. spin_unlock_irqrestore(&res_lock, flags);
  967. return 0;
  968. }
  969. static struct platform_driver pl330_driver = {
  970. .driver = {
  971. .owner = THIS_MODULE,
  972. .name = "s3c-pl330",
  973. },
  974. .probe = pl330_probe,
  975. .remove = pl330_remove,
  976. };
  977. static int __init pl330_init(void)
  978. {
  979. return platform_driver_register(&pl330_driver);
  980. }
  981. module_init(pl330_init);
  982. static void __exit pl330_exit(void)
  983. {
  984. platform_driver_unregister(&pl330_driver);
  985. return;
  986. }
  987. module_exit(pl330_exit);
  988. MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
  989. MODULE_DESCRIPTION("Driver for PL330 DMA Controller");
  990. MODULE_LICENSE("GPL");