hcd_ddma.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377
  1. /*
  2. * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
  3. *
  4. * Copyright (C) 2004-2013 Synopsys, Inc.
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions
  8. * are met:
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions, and the following disclaimer,
  11. * without modification.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. The names of the above-listed copyright holders may not be used
  16. * to endorse or promote products derived from this software without
  17. * specific prior written permission.
  18. *
  19. * ALTERNATIVELY, this software may be distributed under the terms of the
  20. * GNU General Public License ("GPL") as published by the Free Software
  21. * Foundation; either version 2 of the License, or (at your option) any
  22. * later version.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
  25. * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  26. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  27. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  28. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  29. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  30. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  31. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  32. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  33. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  34. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. /*
  37. * This file contains the Descriptor DMA implementation for Host mode
  38. */
  39. #include <linux/kernel.h>
  40. #include <linux/module.h>
  41. #include <linux/spinlock.h>
  42. #include <linux/interrupt.h>
  43. #include <linux/dma-mapping.h>
  44. #include <linux/io.h>
  45. #include <linux/slab.h>
  46. #include <linux/usb.h>
  47. #include <linux/usb/hcd.h>
  48. #include <linux/usb/ch11.h>
  49. #include "core.h"
  50. #include "hcd.h"
  51. static u16 dwc2_frame_list_idx(u16 frame)
  52. {
  53. return frame & (FRLISTEN_64_SIZE - 1);
  54. }
  55. static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed)
  56. {
  57. return (idx + inc) &
  58. ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
  59. MAX_DMA_DESC_NUM_GENERIC) - 1);
  60. }
  61. static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed)
  62. {
  63. return (idx - inc) &
  64. ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
  65. MAX_DMA_DESC_NUM_GENERIC) - 1);
  66. }
  67. static u16 dwc2_max_desc_num(struct dwc2_qh *qh)
  68. {
  69. return (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
  70. qh->dev_speed == USB_SPEED_HIGH) ?
  71. MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC;
  72. }
  73. static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
  74. {
  75. return qh->dev_speed == USB_SPEED_HIGH ?
  76. (qh->host_interval + 8 - 1) / 8 : qh->host_interval;
  77. }
  78. static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
  79. gfp_t flags)
  80. {
  81. struct kmem_cache *desc_cache;
  82. if (qh->ep_type == USB_ENDPOINT_XFER_ISOC
  83. && qh->dev_speed == USB_SPEED_HIGH)
  84. desc_cache = hsotg->desc_hsisoc_cache;
  85. else
  86. desc_cache = hsotg->desc_gen_cache;
  87. qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) *
  88. dwc2_max_desc_num(qh);
  89. qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA);
  90. if (!qh->desc_list)
  91. return -ENOMEM;
  92. qh->desc_list_dma = dma_map_single(hsotg->dev, qh->desc_list,
  93. qh->desc_list_sz,
  94. DMA_TO_DEVICE);
  95. qh->n_bytes = kzalloc(sizeof(u32) * dwc2_max_desc_num(qh), flags);
  96. if (!qh->n_bytes) {
  97. dma_unmap_single(hsotg->dev, qh->desc_list_dma,
  98. qh->desc_list_sz,
  99. DMA_FROM_DEVICE);
  100. kmem_cache_free(desc_cache, qh->desc_list);
  101. qh->desc_list = NULL;
  102. return -ENOMEM;
  103. }
  104. return 0;
  105. }
  106. static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
  107. {
  108. struct kmem_cache *desc_cache;
  109. if (qh->ep_type == USB_ENDPOINT_XFER_ISOC
  110. && qh->dev_speed == USB_SPEED_HIGH)
  111. desc_cache = hsotg->desc_hsisoc_cache;
  112. else
  113. desc_cache = hsotg->desc_gen_cache;
  114. if (qh->desc_list) {
  115. dma_unmap_single(hsotg->dev, qh->desc_list_dma,
  116. qh->desc_list_sz, DMA_FROM_DEVICE);
  117. kmem_cache_free(desc_cache, qh->desc_list);
  118. qh->desc_list = NULL;
  119. }
  120. kfree(qh->n_bytes);
  121. qh->n_bytes = NULL;
  122. }
  123. static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags)
  124. {
  125. if (hsotg->frame_list)
  126. return 0;
  127. hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE;
  128. hsotg->frame_list = kzalloc(hsotg->frame_list_sz, GFP_ATOMIC | GFP_DMA);
  129. if (!hsotg->frame_list)
  130. return -ENOMEM;
  131. hsotg->frame_list_dma = dma_map_single(hsotg->dev, hsotg->frame_list,
  132. hsotg->frame_list_sz,
  133. DMA_TO_DEVICE);
  134. return 0;
  135. }
  136. static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
  137. {
  138. unsigned long flags;
  139. spin_lock_irqsave(&hsotg->lock, flags);
  140. if (!hsotg->frame_list) {
  141. spin_unlock_irqrestore(&hsotg->lock, flags);
  142. return;
  143. }
  144. dma_unmap_single(hsotg->dev, hsotg->frame_list_dma,
  145. hsotg->frame_list_sz, DMA_FROM_DEVICE);
  146. kfree(hsotg->frame_list);
  147. hsotg->frame_list = NULL;
  148. spin_unlock_irqrestore(&hsotg->lock, flags);
  149. }
  150. static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
  151. {
  152. u32 hcfg;
  153. unsigned long flags;
  154. spin_lock_irqsave(&hsotg->lock, flags);
  155. hcfg = dwc2_readl(hsotg->regs + HCFG);
  156. if (hcfg & HCFG_PERSCHEDENA) {
  157. /* already enabled */
  158. spin_unlock_irqrestore(&hsotg->lock, flags);
  159. return;
  160. }
  161. dwc2_writel(hsotg->frame_list_dma, hsotg->regs + HFLBADDR);
  162. hcfg &= ~HCFG_FRLISTEN_MASK;
  163. hcfg |= fr_list_en | HCFG_PERSCHEDENA;
  164. dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n");
  165. dwc2_writel(hcfg, hsotg->regs + HCFG);
  166. spin_unlock_irqrestore(&hsotg->lock, flags);
  167. }
  168. static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg)
  169. {
  170. u32 hcfg;
  171. unsigned long flags;
  172. spin_lock_irqsave(&hsotg->lock, flags);
  173. hcfg = dwc2_readl(hsotg->regs + HCFG);
  174. if (!(hcfg & HCFG_PERSCHEDENA)) {
  175. /* already disabled */
  176. spin_unlock_irqrestore(&hsotg->lock, flags);
  177. return;
  178. }
  179. hcfg &= ~HCFG_PERSCHEDENA;
  180. dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n");
  181. dwc2_writel(hcfg, hsotg->regs + HCFG);
  182. spin_unlock_irqrestore(&hsotg->lock, flags);
  183. }
  184. /*
  185. * Activates/Deactivates FrameList entries for the channel based on endpoint
  186. * servicing period
  187. */
  188. static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
  189. int enable)
  190. {
  191. struct dwc2_host_chan *chan;
  192. u16 i, j, inc;
  193. if (!hsotg) {
  194. pr_err("hsotg = %p\n", hsotg);
  195. return;
  196. }
  197. if (!qh->channel) {
  198. dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel);
  199. return;
  200. }
  201. if (!hsotg->frame_list) {
  202. dev_err(hsotg->dev, "hsotg->frame_list = %p\n",
  203. hsotg->frame_list);
  204. return;
  205. }
  206. chan = qh->channel;
  207. inc = dwc2_frame_incr_val(qh);
  208. if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
  209. i = dwc2_frame_list_idx(qh->next_active_frame);
  210. else
  211. i = 0;
  212. j = i;
  213. do {
  214. if (enable)
  215. hsotg->frame_list[j] |= 1 << chan->hc_num;
  216. else
  217. hsotg->frame_list[j] &= ~(1 << chan->hc_num);
  218. j = (j + inc) & (FRLISTEN_64_SIZE - 1);
  219. } while (j != i);
  220. /*
  221. * Sync frame list since controller will access it if periodic
  222. * channel is currently enabled.
  223. */
  224. dma_sync_single_for_device(hsotg->dev,
  225. hsotg->frame_list_dma,
  226. hsotg->frame_list_sz,
  227. DMA_TO_DEVICE);
  228. if (!enable)
  229. return;
  230. chan->schinfo = 0;
  231. if (chan->speed == USB_SPEED_HIGH && qh->host_interval) {
  232. j = 1;
  233. /* TODO - check this */
  234. inc = (8 + qh->host_interval - 1) / qh->host_interval;
  235. for (i = 0; i < inc; i++) {
  236. chan->schinfo |= j;
  237. j = j << qh->host_interval;
  238. }
  239. } else {
  240. chan->schinfo = 0xff;
  241. }
  242. }
  243. static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
  244. struct dwc2_qh *qh)
  245. {
  246. struct dwc2_host_chan *chan = qh->channel;
  247. if (dwc2_qh_is_non_per(qh)) {
  248. if (hsotg->core_params->uframe_sched > 0)
  249. hsotg->available_host_channels++;
  250. else
  251. hsotg->non_periodic_channels--;
  252. } else {
  253. dwc2_update_frame_list(hsotg, qh, 0);
  254. hsotg->available_host_channels++;
  255. }
  256. /*
  257. * The condition is added to prevent double cleanup try in case of
  258. * device disconnect. See channel cleanup in dwc2_hcd_disconnect().
  259. */
  260. if (chan->qh) {
  261. if (!list_empty(&chan->hc_list_entry))
  262. list_del(&chan->hc_list_entry);
  263. dwc2_hc_cleanup(hsotg, chan);
  264. list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
  265. chan->qh = NULL;
  266. }
  267. qh->channel = NULL;
  268. qh->ntd = 0;
  269. if (qh->desc_list)
  270. memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) *
  271. dwc2_max_desc_num(qh));
  272. }
  273. /**
  274. * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA
  275. * related members
  276. *
  277. * @hsotg: The HCD state structure for the DWC OTG controller
  278. * @qh: The QH to init
  279. *
  280. * Return: 0 if successful, negative error code otherwise
  281. *
  282. * Allocates memory for the descriptor list. For the first periodic QH,
  283. * allocates memory for the FrameList and enables periodic scheduling.
  284. */
  285. int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
  286. gfp_t mem_flags)
  287. {
  288. int retval;
  289. if (qh->do_split) {
  290. dev_err(hsotg->dev,
  291. "SPLIT Transfers are not supported in Descriptor DMA mode.\n");
  292. retval = -EINVAL;
  293. goto err0;
  294. }
  295. retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags);
  296. if (retval)
  297. goto err0;
  298. if (qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
  299. qh->ep_type == USB_ENDPOINT_XFER_INT) {
  300. if (!hsotg->frame_list) {
  301. retval = dwc2_frame_list_alloc(hsotg, mem_flags);
  302. if (retval)
  303. goto err1;
  304. /* Enable periodic schedule on first periodic QH */
  305. dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64);
  306. }
  307. }
  308. qh->ntd = 0;
  309. return 0;
  310. err1:
  311. dwc2_desc_list_free(hsotg, qh);
  312. err0:
  313. return retval;
  314. }
  315. /**
  316. * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related
  317. * members
  318. *
  319. * @hsotg: The HCD state structure for the DWC OTG controller
  320. * @qh: The QH to free
  321. *
  322. * Frees descriptor list memory associated with the QH. If QH is periodic and
  323. * the last, frees FrameList memory and disables periodic scheduling.
  324. */
  325. void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
  326. {
  327. unsigned long flags;
  328. dwc2_desc_list_free(hsotg, qh);
  329. /*
  330. * Channel still assigned due to some reasons.
  331. * Seen on Isoc URB dequeue. Channel halted but no subsequent
  332. * ChHalted interrupt to release the channel. Afterwards
  333. * when it comes here from endpoint disable routine
  334. * channel remains assigned.
  335. */
  336. spin_lock_irqsave(&hsotg->lock, flags);
  337. if (qh->channel)
  338. dwc2_release_channel_ddma(hsotg, qh);
  339. spin_unlock_irqrestore(&hsotg->lock, flags);
  340. if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
  341. qh->ep_type == USB_ENDPOINT_XFER_INT) &&
  342. (hsotg->core_params->uframe_sched > 0 ||
  343. !hsotg->periodic_channels) && hsotg->frame_list) {
  344. dwc2_per_sched_disable(hsotg);
  345. dwc2_frame_list_free(hsotg);
  346. }
  347. }
  348. static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx)
  349. {
  350. if (qh->dev_speed == USB_SPEED_HIGH)
  351. /* Descriptor set (8 descriptors) index which is 8-aligned */
  352. return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
  353. else
  354. return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
  355. }
  356. /*
  357. * Determine starting frame for Isochronous transfer.
  358. * Few frames skipped to prevent race condition with HC.
  359. */
  360. static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg,
  361. struct dwc2_qh *qh, u16 *skip_frames)
  362. {
  363. u16 frame;
  364. hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
  365. /*
  366. * next_active_frame is always frame number (not uFrame) both in FS
  367. * and HS!
  368. */
  369. /*
  370. * skip_frames is used to limit activated descriptors number
  371. * to avoid the situation when HC services the last activated
  372. * descriptor firstly.
  373. * Example for FS:
  374. * Current frame is 1, scheduled frame is 3. Since HC always fetches
  375. * the descriptor corresponding to curr_frame+1, the descriptor
  376. * corresponding to frame 2 will be fetched. If the number of
  377. * descriptors is max=64 (or greather) the list will be fully programmed
  378. * with Active descriptors and it is possible case (rare) that the
  379. * latest descriptor(considering rollback) corresponding to frame 2 will
  380. * be serviced first. HS case is more probable because, in fact, up to
  381. * 11 uframes (16 in the code) may be skipped.
  382. */
  383. if (qh->dev_speed == USB_SPEED_HIGH) {
  384. /*
  385. * Consider uframe counter also, to start xfer asap. If half of
  386. * the frame elapsed skip 2 frames otherwise just 1 frame.
  387. * Starting descriptor index must be 8-aligned, so if the
  388. * current frame is near to complete the next one is skipped as
  389. * well.
  390. */
  391. if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) {
  392. *skip_frames = 2 * 8;
  393. frame = dwc2_frame_num_inc(hsotg->frame_number,
  394. *skip_frames);
  395. } else {
  396. *skip_frames = 1 * 8;
  397. frame = dwc2_frame_num_inc(hsotg->frame_number,
  398. *skip_frames);
  399. }
  400. frame = dwc2_full_frame_num(frame);
  401. } else {
  402. /*
  403. * Two frames are skipped for FS - the current and the next.
  404. * But for descriptor programming, 1 frame (descriptor) is
  405. * enough, see example above.
  406. */
  407. *skip_frames = 1;
  408. frame = dwc2_frame_num_inc(hsotg->frame_number, 2);
  409. }
  410. return frame;
  411. }
  412. /*
  413. * Calculate initial descriptor index for isochronous transfer based on
  414. * scheduled frame
  415. */
  416. static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg,
  417. struct dwc2_qh *qh)
  418. {
  419. u16 frame, fr_idx, fr_idx_tmp, skip_frames;
  420. /*
  421. * With current ISOC processing algorithm the channel is being released
  422. * when no more QTDs in the list (qh->ntd == 0). Thus this function is
  423. * called only when qh->ntd == 0 and qh->channel == 0.
  424. *
  425. * So qh->channel != NULL branch is not used and just not removed from
  426. * the source file. It is required for another possible approach which
  427. * is, do not disable and release the channel when ISOC session
  428. * completed, just move QH to inactive schedule until new QTD arrives.
  429. * On new QTD, the QH moved back to 'ready' schedule, starting frame and
  430. * therefore starting desc_index are recalculated. In this case channel
  431. * is released only on ep_disable.
  432. */
  433. /*
  434. * Calculate starting descriptor index. For INTERRUPT endpoint it is
  435. * always 0.
  436. */
  437. if (qh->channel) {
  438. frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames);
  439. /*
  440. * Calculate initial descriptor index based on FrameList current
  441. * bitmap and servicing period
  442. */
  443. fr_idx_tmp = dwc2_frame_list_idx(frame);
  444. fr_idx = (FRLISTEN_64_SIZE +
  445. dwc2_frame_list_idx(qh->next_active_frame) -
  446. fr_idx_tmp) % dwc2_frame_incr_val(qh);
  447. fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
  448. } else {
  449. qh->next_active_frame = dwc2_calc_starting_frame(hsotg, qh,
  450. &skip_frames);
  451. fr_idx = dwc2_frame_list_idx(qh->next_active_frame);
  452. }
  453. qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx);
  454. return skip_frames;
  455. }
  456. #define ISOC_URB_GIVEBACK_ASAP
  457. #define MAX_ISOC_XFER_SIZE_FS 1023
  458. #define MAX_ISOC_XFER_SIZE_HS 3072
  459. #define DESCNUM_THRESHOLD 4
  460. static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
  461. struct dwc2_qtd *qtd,
  462. struct dwc2_qh *qh, u32 max_xfer_size,
  463. u16 idx)
  464. {
  465. struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
  466. struct dwc2_hcd_iso_packet_desc *frame_desc;
  467. memset(dma_desc, 0, sizeof(*dma_desc));
  468. frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
  469. if (frame_desc->length > max_xfer_size)
  470. qh->n_bytes[idx] = max_xfer_size;
  471. else
  472. qh->n_bytes[idx] = frame_desc->length;
  473. dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
  474. dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT &
  475. HOST_DMA_ISOC_NBYTES_MASK;
  476. /* Set active bit */
  477. dma_desc->status |= HOST_DMA_A;
  478. qh->ntd++;
  479. qtd->isoc_frame_index_last++;
  480. #ifdef ISOC_URB_GIVEBACK_ASAP
  481. /* Set IOC for each descriptor corresponding to last frame of URB */
  482. if (qtd->isoc_frame_index_last == qtd->urb->packet_count)
  483. dma_desc->status |= HOST_DMA_IOC;
  484. #endif
  485. dma_sync_single_for_device(hsotg->dev,
  486. qh->desc_list_dma +
  487. (idx * sizeof(struct dwc2_hcd_dma_desc)),
  488. sizeof(struct dwc2_hcd_dma_desc),
  489. DMA_TO_DEVICE);
  490. }
  491. static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
  492. struct dwc2_qh *qh, u16 skip_frames)
  493. {
  494. struct dwc2_qtd *qtd;
  495. u32 max_xfer_size;
  496. u16 idx, inc, n_desc = 0, ntd_max = 0;
  497. u16 cur_idx;
  498. u16 next_idx;
  499. idx = qh->td_last;
  500. inc = qh->host_interval;
  501. hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
  502. cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
  503. next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
  504. /*
  505. * Ensure current frame number didn't overstep last scheduled
  506. * descriptor. If it happens, the only way to recover is to move
  507. * qh->td_last to current frame number + 1.
  508. * So that next isoc descriptor will be scheduled on frame number + 1
  509. * and not on a past frame.
  510. */
  511. if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) {
  512. if (inc < 32) {
  513. dev_vdbg(hsotg->dev,
  514. "current frame number overstep last descriptor\n");
  515. qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc,
  516. qh->dev_speed);
  517. idx = qh->td_last;
  518. }
  519. }
  520. if (qh->host_interval) {
  521. ntd_max = (dwc2_max_desc_num(qh) + qh->host_interval - 1) /
  522. qh->host_interval;
  523. if (skip_frames && !qh->channel)
  524. ntd_max -= skip_frames / qh->host_interval;
  525. }
  526. max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
  527. MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS;
  528. list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
  529. if (qtd->in_process &&
  530. qtd->isoc_frame_index_last ==
  531. qtd->urb->packet_count)
  532. continue;
  533. qtd->isoc_td_first = idx;
  534. while (qh->ntd < ntd_max && qtd->isoc_frame_index_last <
  535. qtd->urb->packet_count) {
  536. dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh,
  537. max_xfer_size, idx);
  538. idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed);
  539. n_desc++;
  540. }
  541. qtd->isoc_td_last = idx;
  542. qtd->in_process = 1;
  543. }
  544. qh->td_last = idx;
  545. #ifdef ISOC_URB_GIVEBACK_ASAP
  546. /* Set IOC for last descriptor if descriptor list is full */
  547. if (qh->ntd == ntd_max) {
  548. idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
  549. qh->desc_list[idx].status |= HOST_DMA_IOC;
  550. dma_sync_single_for_device(hsotg->dev,
  551. qh->desc_list_dma + (idx *
  552. sizeof(struct dwc2_hcd_dma_desc)),
  553. sizeof(struct dwc2_hcd_dma_desc),
  554. DMA_TO_DEVICE);
  555. }
  556. #else
  557. /*
  558. * Set IOC bit only for one descriptor. Always try to be ahead of HW
  559. * processing, i.e. on IOC generation driver activates next descriptor
  560. * but core continues to process descriptors following the one with IOC
  561. * set.
  562. */
  563. if (n_desc > DESCNUM_THRESHOLD)
  564. /*
  565. * Move IOC "up". Required even if there is only one QTD
  566. * in the list, because QTDs might continue to be queued,
  567. * but during the activation it was only one queued.
  568. * Actually more than one QTD might be in the list if this
  569. * function called from XferCompletion - QTDs was queued during
  570. * HW processing of the previous descriptor chunk.
  571. */
  572. idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
  573. qh->dev_speed);
  574. else
  575. /*
  576. * Set the IOC for the latest descriptor if either number of
  577. * descriptors is not greater than threshold or no more new
  578. * descriptors activated
  579. */
  580. idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
  581. qh->desc_list[idx].status |= HOST_DMA_IOC;
  582. dma_sync_single_for_device(hsotg->dev,
  583. qh->desc_list_dma +
  584. (idx * sizeof(struct dwc2_hcd_dma_desc)),
  585. sizeof(struct dwc2_hcd_dma_desc),
  586. DMA_TO_DEVICE);
  587. #endif
  588. }
  589. static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
  590. struct dwc2_host_chan *chan,
  591. struct dwc2_qtd *qtd, struct dwc2_qh *qh,
  592. int n_desc)
  593. {
  594. struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc];
  595. int len = chan->xfer_len;
  596. if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1))
  597. len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1);
  598. if (chan->ep_is_in) {
  599. int num_packets;
  600. if (len > 0 && chan->max_packet)
  601. num_packets = (len + chan->max_packet - 1)
  602. / chan->max_packet;
  603. else
  604. /* Need 1 packet for transfer length of 0 */
  605. num_packets = 1;
  606. /* Always program an integral # of packets for IN transfers */
  607. len = num_packets * chan->max_packet;
  608. }
  609. dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK;
  610. qh->n_bytes[n_desc] = len;
  611. if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL &&
  612. qtd->control_phase == DWC2_CONTROL_SETUP)
  613. dma_desc->status |= HOST_DMA_SUP;
  614. dma_desc->buf = (u32)chan->xfer_dma;
  615. dma_sync_single_for_device(hsotg->dev,
  616. qh->desc_list_dma +
  617. (n_desc * sizeof(struct dwc2_hcd_dma_desc)),
  618. sizeof(struct dwc2_hcd_dma_desc),
  619. DMA_TO_DEVICE);
  620. /*
  621. * Last (or only) descriptor of IN transfer with actual size less
  622. * than MaxPacket
  623. */
  624. if (len > chan->xfer_len) {
  625. chan->xfer_len = 0;
  626. } else {
  627. chan->xfer_dma += len;
  628. chan->xfer_len -= len;
  629. }
  630. }
  631. static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
  632. struct dwc2_qh *qh)
  633. {
  634. struct dwc2_qtd *qtd;
  635. struct dwc2_host_chan *chan = qh->channel;
  636. int n_desc = 0;
  637. dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh,
  638. (unsigned long)chan->xfer_dma, chan->xfer_len);
  639. /*
  640. * Start with chan->xfer_dma initialized in assign_and_init_hc(), then
  641. * if SG transfer consists of multiple URBs, this pointer is re-assigned
  642. * to the buffer of the currently processed QTD. For non-SG request
  643. * there is always one QTD active.
  644. */
  645. list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
  646. dev_vdbg(hsotg->dev, "qtd=%p\n", qtd);
  647. if (n_desc) {
  648. /* SG request - more than 1 QTD */
  649. chan->xfer_dma = qtd->urb->dma +
  650. qtd->urb->actual_length;
  651. chan->xfer_len = qtd->urb->length -
  652. qtd->urb->actual_length;
  653. dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n",
  654. (unsigned long)chan->xfer_dma, chan->xfer_len);
  655. }
  656. qtd->n_desc = 0;
  657. do {
  658. if (n_desc > 1) {
  659. qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
  660. dev_vdbg(hsotg->dev,
  661. "set A bit in desc %d (%p)\n",
  662. n_desc - 1,
  663. &qh->desc_list[n_desc - 1]);
  664. dma_sync_single_for_device(hsotg->dev,
  665. qh->desc_list_dma +
  666. ((n_desc - 1) *
  667. sizeof(struct dwc2_hcd_dma_desc)),
  668. sizeof(struct dwc2_hcd_dma_desc),
  669. DMA_TO_DEVICE);
  670. }
  671. dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
  672. dev_vdbg(hsotg->dev,
  673. "desc %d (%p) buf=%08x status=%08x\n",
  674. n_desc, &qh->desc_list[n_desc],
  675. qh->desc_list[n_desc].buf,
  676. qh->desc_list[n_desc].status);
  677. qtd->n_desc++;
  678. n_desc++;
  679. } while (chan->xfer_len > 0 &&
  680. n_desc != MAX_DMA_DESC_NUM_GENERIC);
  681. dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc);
  682. qtd->in_process = 1;
  683. if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL)
  684. break;
  685. if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
  686. break;
  687. }
  688. if (n_desc) {
  689. qh->desc_list[n_desc - 1].status |=
  690. HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
  691. dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
  692. n_desc - 1, &qh->desc_list[n_desc - 1]);
  693. dma_sync_single_for_device(hsotg->dev,
  694. qh->desc_list_dma + (n_desc - 1) *
  695. sizeof(struct dwc2_hcd_dma_desc),
  696. sizeof(struct dwc2_hcd_dma_desc),
  697. DMA_TO_DEVICE);
  698. if (n_desc > 1) {
  699. qh->desc_list[0].status |= HOST_DMA_A;
  700. dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
  701. &qh->desc_list[0]);
  702. dma_sync_single_for_device(hsotg->dev,
  703. qh->desc_list_dma,
  704. sizeof(struct dwc2_hcd_dma_desc),
  705. DMA_TO_DEVICE);
  706. }
  707. chan->ntd = n_desc;
  708. }
  709. }
  710. /**
  711. * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode
  712. *
  713. * @hsotg: The HCD state structure for the DWC OTG controller
  714. * @qh: The QH to init
  715. *
  716. * Return: 0 if successful, negative error code otherwise
  717. *
  718. * For Control and Bulk endpoints, initializes descriptor list and starts the
  719. * transfer. For Interrupt and Isochronous endpoints, initializes descriptor
  720. * list then updates FrameList, marking appropriate entries as active.
  721. *
  722. * For Isochronous endpoints the starting descriptor index is calculated based
  723. * on the scheduled frame, but only on the first transfer descriptor within a
  724. * session. Then the transfer is started via enabling the channel.
  725. *
  726. * For Isochronous endpoints the channel is not halted on XferComplete
  727. * interrupt so remains assigned to the endpoint(QH) until session is done.
  728. */
  729. void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
  730. {
  731. /* Channel is already assigned */
  732. struct dwc2_host_chan *chan = qh->channel;
  733. u16 skip_frames = 0;
  734. switch (chan->ep_type) {
  735. case USB_ENDPOINT_XFER_CONTROL:
  736. case USB_ENDPOINT_XFER_BULK:
  737. dwc2_init_non_isoc_dma_desc(hsotg, qh);
  738. dwc2_hc_start_transfer_ddma(hsotg, chan);
  739. break;
  740. case USB_ENDPOINT_XFER_INT:
  741. dwc2_init_non_isoc_dma_desc(hsotg, qh);
  742. dwc2_update_frame_list(hsotg, qh, 1);
  743. dwc2_hc_start_transfer_ddma(hsotg, chan);
  744. break;
  745. case USB_ENDPOINT_XFER_ISOC:
  746. if (!qh->ntd)
  747. skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh);
  748. dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames);
  749. if (!chan->xfer_started) {
  750. dwc2_update_frame_list(hsotg, qh, 1);
  751. /*
  752. * Always set to max, instead of actual size. Otherwise
  753. * ntd will be changed with channel being enabled. Not
  754. * recommended.
  755. */
  756. chan->ntd = dwc2_max_desc_num(qh);
  757. /* Enable channel only once for ISOC */
  758. dwc2_hc_start_transfer_ddma(hsotg, chan);
  759. }
  760. break;
  761. default:
  762. break;
  763. }
  764. }
  765. #define DWC2_CMPL_DONE 1
  766. #define DWC2_CMPL_STOP 2
  767. static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
  768. struct dwc2_host_chan *chan,
  769. struct dwc2_qtd *qtd,
  770. struct dwc2_qh *qh, u16 idx)
  771. {
  772. struct dwc2_hcd_dma_desc *dma_desc;
  773. struct dwc2_hcd_iso_packet_desc *frame_desc;
  774. u16 remain = 0;
  775. int rc = 0;
  776. if (!qtd->urb)
  777. return -EINVAL;
  778. dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
  779. sizeof(struct dwc2_hcd_dma_desc)),
  780. sizeof(struct dwc2_hcd_dma_desc),
  781. DMA_FROM_DEVICE);
  782. dma_desc = &qh->desc_list[idx];
  783. frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
  784. dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
  785. if (chan->ep_is_in)
  786. remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
  787. HOST_DMA_ISOC_NBYTES_SHIFT;
  788. if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
  789. /*
  790. * XactError, or unable to complete all the transactions
  791. * in the scheduled micro-frame/frame, both indicated by
  792. * HOST_DMA_STS_PKTERR
  793. */
  794. qtd->urb->error_count++;
  795. frame_desc->actual_length = qh->n_bytes[idx] - remain;
  796. frame_desc->status = -EPROTO;
  797. } else {
  798. /* Success */
  799. frame_desc->actual_length = qh->n_bytes[idx] - remain;
  800. frame_desc->status = 0;
  801. }
  802. if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
  803. /*
  804. * urb->status is not used for isoc transfers here. The
  805. * individual frame_desc status are used instead.
  806. */
  807. dwc2_host_complete(hsotg, qtd, 0);
  808. dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
  809. /*
  810. * This check is necessary because urb_dequeue can be called
  811. * from urb complete callback (sound driver for example). All
  812. * pending URBs are dequeued there, so no need for further
  813. * processing.
  814. */
  815. if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE)
  816. return -1;
  817. rc = DWC2_CMPL_DONE;
  818. }
  819. qh->ntd--;
  820. /* Stop if IOC requested descriptor reached */
  821. if (dma_desc->status & HOST_DMA_IOC)
  822. rc = DWC2_CMPL_STOP;
  823. return rc;
  824. }
  825. static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
  826. struct dwc2_host_chan *chan,
  827. enum dwc2_halt_status halt_status)
  828. {
  829. struct dwc2_hcd_iso_packet_desc *frame_desc;
  830. struct dwc2_qtd *qtd, *qtd_tmp;
  831. struct dwc2_qh *qh;
  832. u16 idx;
  833. int rc;
  834. qh = chan->qh;
  835. idx = qh->td_first;
  836. if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
  837. list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
  838. qtd->in_process = 0;
  839. return;
  840. }
  841. if (halt_status == DWC2_HC_XFER_AHB_ERR ||
  842. halt_status == DWC2_HC_XFER_BABBLE_ERR) {
  843. /*
  844. * Channel is halted in these error cases, considered as serious
  845. * issues.
  846. * Complete all URBs marking all frames as failed, irrespective
  847. * whether some of the descriptors (frames) succeeded or not.
  848. * Pass error code to completion routine as well, to update
  849. * urb->status, some of class drivers might use it to stop
  850. * queing transfer requests.
  851. */
  852. int err = halt_status == DWC2_HC_XFER_AHB_ERR ?
  853. -EIO : -EOVERFLOW;
  854. list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
  855. qtd_list_entry) {
  856. if (qtd->urb) {
  857. for (idx = 0; idx < qtd->urb->packet_count;
  858. idx++) {
  859. frame_desc = &qtd->urb->iso_descs[idx];
  860. frame_desc->status = err;
  861. }
  862. dwc2_host_complete(hsotg, qtd, err);
  863. }
  864. dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
  865. }
  866. return;
  867. }
  868. list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
  869. if (!qtd->in_process)
  870. break;
  871. /*
  872. * Ensure idx corresponds to descriptor where first urb of this
  873. * qtd was added. In fact, during isoc desc init, dwc2 may skip
  874. * an index if current frame number is already over this index.
  875. */
  876. if (idx != qtd->isoc_td_first) {
  877. dev_vdbg(hsotg->dev,
  878. "try to complete %d instead of %d\n",
  879. idx, qtd->isoc_td_first);
  880. idx = qtd->isoc_td_first;
  881. }
  882. do {
  883. struct dwc2_qtd *qtd_next;
  884. u16 cur_idx;
  885. rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh,
  886. idx);
  887. if (rc < 0)
  888. return;
  889. idx = dwc2_desclist_idx_inc(idx, qh->host_interval,
  890. chan->speed);
  891. if (!rc)
  892. continue;
  893. if (rc == DWC2_CMPL_DONE)
  894. break;
  895. /* rc == DWC2_CMPL_STOP */
  896. if (qh->host_interval >= 32)
  897. goto stop_scan;
  898. qh->td_first = idx;
  899. cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
  900. qtd_next = list_first_entry(&qh->qtd_list,
  901. struct dwc2_qtd,
  902. qtd_list_entry);
  903. if (dwc2_frame_idx_num_gt(cur_idx,
  904. qtd_next->isoc_td_last))
  905. break;
  906. goto stop_scan;
  907. } while (idx != qh->td_first);
  908. }
  909. stop_scan:
  910. qh->td_first = idx;
  911. }
  912. static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
  913. struct dwc2_host_chan *chan,
  914. struct dwc2_qtd *qtd,
  915. struct dwc2_hcd_dma_desc *dma_desc,
  916. enum dwc2_halt_status halt_status,
  917. u32 n_bytes, int *xfer_done)
  918. {
  919. struct dwc2_hcd_urb *urb = qtd->urb;
  920. u16 remain = 0;
  921. if (chan->ep_is_in)
  922. remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >>
  923. HOST_DMA_NBYTES_SHIFT;
  924. dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb);
  925. if (halt_status == DWC2_HC_XFER_AHB_ERR) {
  926. dev_err(hsotg->dev, "EIO\n");
  927. urb->status = -EIO;
  928. return 1;
  929. }
  930. if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
  931. switch (halt_status) {
  932. case DWC2_HC_XFER_STALL:
  933. dev_vdbg(hsotg->dev, "Stall\n");
  934. urb->status = -EPIPE;
  935. break;
  936. case DWC2_HC_XFER_BABBLE_ERR:
  937. dev_err(hsotg->dev, "Babble\n");
  938. urb->status = -EOVERFLOW;
  939. break;
  940. case DWC2_HC_XFER_XACT_ERR:
  941. dev_err(hsotg->dev, "XactErr\n");
  942. urb->status = -EPROTO;
  943. break;
  944. default:
  945. dev_err(hsotg->dev,
  946. "%s: Unhandled descriptor error status (%d)\n",
  947. __func__, halt_status);
  948. break;
  949. }
  950. return 1;
  951. }
  952. if (dma_desc->status & HOST_DMA_A) {
  953. dev_vdbg(hsotg->dev,
  954. "Active descriptor encountered on channel %d\n",
  955. chan->hc_num);
  956. return 0;
  957. }
  958. if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) {
  959. if (qtd->control_phase == DWC2_CONTROL_DATA) {
  960. urb->actual_length += n_bytes - remain;
  961. if (remain || urb->actual_length >= urb->length) {
  962. /*
  963. * For Control Data stage do not set urb->status
  964. * to 0, to prevent URB callback. Set it when
  965. * Status phase is done. See below.
  966. */
  967. *xfer_done = 1;
  968. }
  969. } else if (qtd->control_phase == DWC2_CONTROL_STATUS) {
  970. urb->status = 0;
  971. *xfer_done = 1;
  972. }
  973. /* No handling for SETUP stage */
  974. } else {
  975. /* BULK and INTR */
  976. urb->actual_length += n_bytes - remain;
  977. dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length,
  978. urb->actual_length);
  979. if (remain || urb->actual_length >= urb->length) {
  980. urb->status = 0;
  981. *xfer_done = 1;
  982. }
  983. }
  984. return 0;
  985. }
  986. static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
  987. struct dwc2_host_chan *chan,
  988. int chnum, struct dwc2_qtd *qtd,
  989. int desc_num,
  990. enum dwc2_halt_status halt_status,
  991. int *xfer_done)
  992. {
  993. struct dwc2_qh *qh = chan->qh;
  994. struct dwc2_hcd_urb *urb = qtd->urb;
  995. struct dwc2_hcd_dma_desc *dma_desc;
  996. u32 n_bytes;
  997. int failed;
  998. dev_vdbg(hsotg->dev, "%s()\n", __func__);
  999. if (!urb)
  1000. return -EINVAL;
  1001. dma_sync_single_for_cpu(hsotg->dev,
  1002. qh->desc_list_dma + (desc_num *
  1003. sizeof(struct dwc2_hcd_dma_desc)),
  1004. sizeof(struct dwc2_hcd_dma_desc),
  1005. DMA_FROM_DEVICE);
  1006. dma_desc = &qh->desc_list[desc_num];
  1007. n_bytes = qh->n_bytes[desc_num];
  1008. dev_vdbg(hsotg->dev,
  1009. "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n",
  1010. qtd, urb, desc_num, dma_desc, n_bytes);
  1011. failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
  1012. halt_status, n_bytes,
  1013. xfer_done);
  1014. if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
  1015. dwc2_host_complete(hsotg, qtd, urb->status);
  1016. dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
  1017. dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n",
  1018. failed, *xfer_done);
  1019. return failed;
  1020. }
  1021. if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) {
  1022. switch (qtd->control_phase) {
  1023. case DWC2_CONTROL_SETUP:
  1024. if (urb->length > 0)
  1025. qtd->control_phase = DWC2_CONTROL_DATA;
  1026. else
  1027. qtd->control_phase = DWC2_CONTROL_STATUS;
  1028. dev_vdbg(hsotg->dev,
  1029. " Control setup transaction done\n");
  1030. break;
  1031. case DWC2_CONTROL_DATA:
  1032. if (*xfer_done) {
  1033. qtd->control_phase = DWC2_CONTROL_STATUS;
  1034. dev_vdbg(hsotg->dev,
  1035. " Control data transfer done\n");
  1036. } else if (desc_num + 1 == qtd->n_desc) {
  1037. /*
  1038. * Last descriptor for Control data stage which
  1039. * is not completed yet
  1040. */
  1041. dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
  1042. qtd);
  1043. }
  1044. break;
  1045. default:
  1046. break;
  1047. }
  1048. }
  1049. return 0;
  1050. }
  1051. static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
  1052. struct dwc2_host_chan *chan,
  1053. int chnum,
  1054. enum dwc2_halt_status halt_status)
  1055. {
  1056. struct list_head *qtd_item, *qtd_tmp;
  1057. struct dwc2_qh *qh = chan->qh;
  1058. struct dwc2_qtd *qtd = NULL;
  1059. int xfer_done;
  1060. int desc_num = 0;
  1061. if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
  1062. list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
  1063. qtd->in_process = 0;
  1064. return;
  1065. }
  1066. list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
  1067. int i;
  1068. int qtd_desc_count;
  1069. qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
  1070. xfer_done = 0;
  1071. qtd_desc_count = qtd->n_desc;
  1072. for (i = 0; i < qtd_desc_count; i++) {
  1073. if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
  1074. desc_num, halt_status,
  1075. &xfer_done)) {
  1076. qtd = NULL;
  1077. goto stop_scan;
  1078. }
  1079. desc_num++;
  1080. }
  1081. }
  1082. stop_scan:
  1083. if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
  1084. /*
  1085. * Resetting the data toggle for bulk and interrupt endpoints
  1086. * in case of stall. See handle_hc_stall_intr().
  1087. */
  1088. if (halt_status == DWC2_HC_XFER_STALL)
  1089. qh->data_toggle = DWC2_HC_PID_DATA0;
  1090. else
  1091. dwc2_hcd_save_data_toggle(hsotg, chan, chnum, NULL);
  1092. }
  1093. if (halt_status == DWC2_HC_XFER_COMPLETE) {
  1094. if (chan->hcint & HCINTMSK_NYET) {
  1095. /*
  1096. * Got a NYET on the last transaction of the transfer.
  1097. * It means that the endpoint should be in the PING
  1098. * state at the beginning of the next transfer.
  1099. */
  1100. qh->ping_state = 1;
  1101. }
  1102. }
  1103. }
  1104. /**
  1105. * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's
  1106. * status and calls completion routine for the URB if it's done. Called from
  1107. * interrupt handlers.
  1108. *
  1109. * @hsotg: The HCD state structure for the DWC OTG controller
  1110. * @chan: Host channel the transfer is completed on
  1111. * @chnum: Index of Host channel registers
  1112. * @halt_status: Reason the channel is being halted or just XferComplete
  1113. * for isochronous transfers
  1114. *
  1115. * Releases the channel to be used by other transfers.
  1116. * In case of Isochronous endpoint the channel is not halted until the end of
  1117. * the session, i.e. QTD list is empty.
  1118. * If periodic channel released the FrameList is updated accordingly.
  1119. * Calls transaction selection routines to activate pending transfers.
  1120. */
  1121. void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
  1122. struct dwc2_host_chan *chan, int chnum,
  1123. enum dwc2_halt_status halt_status)
  1124. {
  1125. struct dwc2_qh *qh = chan->qh;
  1126. int continue_isoc_xfer = 0;
  1127. enum dwc2_transaction_type tr_type;
  1128. if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
  1129. dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status);
  1130. /* Release the channel if halted or session completed */
  1131. if (halt_status != DWC2_HC_XFER_COMPLETE ||
  1132. list_empty(&qh->qtd_list)) {
  1133. struct dwc2_qtd *qtd, *qtd_tmp;
  1134. /*
  1135. * Kill all remainings QTDs since channel has been
  1136. * halted.
  1137. */
  1138. list_for_each_entry_safe(qtd, qtd_tmp,
  1139. &qh->qtd_list,
  1140. qtd_list_entry) {
  1141. dwc2_host_complete(hsotg, qtd,
  1142. -ECONNRESET);
  1143. dwc2_hcd_qtd_unlink_and_free(hsotg,
  1144. qtd, qh);
  1145. }
  1146. /* Halt the channel if session completed */
  1147. if (halt_status == DWC2_HC_XFER_COMPLETE)
  1148. dwc2_hc_halt(hsotg, chan, halt_status);
  1149. dwc2_release_channel_ddma(hsotg, qh);
  1150. dwc2_hcd_qh_unlink(hsotg, qh);
  1151. } else {
  1152. /* Keep in assigned schedule to continue transfer */
  1153. list_move_tail(&qh->qh_list_entry,
  1154. &hsotg->periodic_sched_assigned);
  1155. /*
  1156. * If channel has been halted during giveback of urb
  1157. * then prevent any new scheduling.
  1158. */
  1159. if (!chan->halt_status)
  1160. continue_isoc_xfer = 1;
  1161. }
  1162. /*
  1163. * Todo: Consider the case when period exceeds FrameList size.
  1164. * Frame Rollover interrupt should be used.
  1165. */
  1166. } else {
  1167. /*
  1168. * Scan descriptor list to complete the URB(s), then release
  1169. * the channel
  1170. */
  1171. dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum,
  1172. halt_status);
  1173. dwc2_release_channel_ddma(hsotg, qh);
  1174. dwc2_hcd_qh_unlink(hsotg, qh);
  1175. if (!list_empty(&qh->qtd_list)) {
  1176. /*
  1177. * Add back to inactive non-periodic schedule on normal
  1178. * completion
  1179. */
  1180. dwc2_hcd_qh_add(hsotg, qh);
  1181. }
  1182. }
  1183. tr_type = dwc2_hcd_select_transactions(hsotg);
  1184. if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) {
  1185. if (continue_isoc_xfer) {
  1186. if (tr_type == DWC2_TRANSACTION_NONE)
  1187. tr_type = DWC2_TRANSACTION_PERIODIC;
  1188. else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC)
  1189. tr_type = DWC2_TRANSACTION_ALL;
  1190. }
  1191. dwc2_hcd_queue_transactions(hsotg, tr_type);
  1192. }
  1193. }