gr_udc.c 55 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * USB Peripheral Controller driver for Aeroflex Gaisler GRUSBDC.
  4. *
  5. * 2013 (c) Aeroflex Gaisler AB
  6. *
  7. * This driver supports GRUSBDC USB Device Controller cores available in the
  8. * GRLIB VHDL IP core library.
  9. *
  10. * Full documentation of the GRUSBDC core can be found here:
  11. * http://www.gaisler.com/products/grlib/grip.pdf
  12. *
  13. * Contributors:
  14. * - Andreas Larsson <andreas@gaisler.com>
  15. * - Marko Isomaki
  16. */
  17. /*
  18. * A GRUSBDC core can have up to 16 IN endpoints and 16 OUT endpoints each
  19. * individually configurable to any of the four USB transfer types. This driver
  20. * only supports cores in DMA mode.
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/slab.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/errno.h>
  27. #include <linux/list.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/device.h>
  30. #include <linux/usb/ch9.h>
  31. #include <linux/usb/gadget.h>
  32. #include <linux/dma-mapping.h>
  33. #include <linux/dmapool.h>
  34. #include <linux/debugfs.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/of_platform.h>
  37. #include <linux/of_irq.h>
  38. #include <linux/of_address.h>
  39. #include <asm/byteorder.h>
  40. #include "gr_udc.h"
  41. #define DRIVER_NAME "gr_udc"
  42. #define DRIVER_DESC "Aeroflex Gaisler GRUSBDC USB Peripheral Controller"
  43. static const char driver_name[] = DRIVER_NAME;
  44. static const char driver_desc[] = DRIVER_DESC;
  45. #define gr_read32(x) (ioread32be((x)))
  46. #define gr_write32(x, v) (iowrite32be((v), (x)))
  47. /* USB speed and corresponding string calculated from status register value */
  48. #define GR_SPEED(status) \
  49. ((status & GR_STATUS_SP) ? USB_SPEED_FULL : USB_SPEED_HIGH)
  50. #define GR_SPEED_STR(status) usb_speed_string(GR_SPEED(status))
  51. /* Size of hardware buffer calculated from epctrl register value */
  52. #define GR_BUFFER_SIZE(epctrl) \
  53. ((((epctrl) & GR_EPCTRL_BUFSZ_MASK) >> GR_EPCTRL_BUFSZ_POS) * \
  54. GR_EPCTRL_BUFSZ_SCALER)
  55. /* ---------------------------------------------------------------------- */
  56. /* Debug printout functionality */
  57. static const char * const gr_modestring[] = {"control", "iso", "bulk", "int"};
  58. static const char *gr_ep0state_string(enum gr_ep0state state)
  59. {
  60. static const char *const names[] = {
  61. [GR_EP0_DISCONNECT] = "disconnect",
  62. [GR_EP0_SETUP] = "setup",
  63. [GR_EP0_IDATA] = "idata",
  64. [GR_EP0_ODATA] = "odata",
  65. [GR_EP0_ISTATUS] = "istatus",
  66. [GR_EP0_OSTATUS] = "ostatus",
  67. [GR_EP0_STALL] = "stall",
  68. [GR_EP0_SUSPEND] = "suspend",
  69. };
  70. if (state < 0 || state >= ARRAY_SIZE(names))
  71. return "UNKNOWN";
  72. return names[state];
  73. }
  74. #ifdef VERBOSE_DEBUG
  75. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  76. struct gr_request *req)
  77. {
  78. int buflen = ep->is_in ? req->req.length : req->req.actual;
  79. int rowlen = 32;
  80. int plen = min(rowlen, buflen);
  81. dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen,
  82. (buflen > plen ? " (truncated)" : ""));
  83. print_hex_dump_debug(" ", DUMP_PREFIX_NONE,
  84. rowlen, 4, req->req.buf, plen, false);
  85. }
  86. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  87. u16 value, u16 index, u16 length)
  88. {
  89. dev_vdbg(dev->dev, "REQ: %02x.%02x v%04x i%04x l%04x\n",
  90. type, request, value, index, length);
  91. }
  92. #else /* !VERBOSE_DEBUG */
  93. static void gr_dbgprint_request(const char *str, struct gr_ep *ep,
  94. struct gr_request *req) {}
  95. static void gr_dbgprint_devreq(struct gr_udc *dev, u8 type, u8 request,
  96. u16 value, u16 index, u16 length) {}
  97. #endif /* VERBOSE_DEBUG */
  98. /* ---------------------------------------------------------------------- */
  99. /* Debugfs functionality */
  100. #ifdef CONFIG_USB_GADGET_DEBUG_FS
  101. static void gr_seq_ep_show(struct seq_file *seq, struct gr_ep *ep)
  102. {
  103. u32 epctrl = gr_read32(&ep->regs->epctrl);
  104. u32 epstat = gr_read32(&ep->regs->epstat);
  105. int mode = (epctrl & GR_EPCTRL_TT_MASK) >> GR_EPCTRL_TT_POS;
  106. struct gr_request *req;
  107. seq_printf(seq, "%s:\n", ep->ep.name);
  108. seq_printf(seq, " mode = %s\n", gr_modestring[mode]);
  109. seq_printf(seq, " halted: %d\n", !!(epctrl & GR_EPCTRL_EH));
  110. seq_printf(seq, " disabled: %d\n", !!(epctrl & GR_EPCTRL_ED));
  111. seq_printf(seq, " valid: %d\n", !!(epctrl & GR_EPCTRL_EV));
  112. seq_printf(seq, " dma_start = %d\n", ep->dma_start);
  113. seq_printf(seq, " stopped = %d\n", ep->stopped);
  114. seq_printf(seq, " wedged = %d\n", ep->wedged);
  115. seq_printf(seq, " callback = %d\n", ep->callback);
  116. seq_printf(seq, " maxpacket = %d\n", ep->ep.maxpacket);
  117. seq_printf(seq, " maxpacket_limit = %d\n", ep->ep.maxpacket_limit);
  118. seq_printf(seq, " bytes_per_buffer = %d\n", ep->bytes_per_buffer);
  119. if (mode == 1 || mode == 3)
  120. seq_printf(seq, " nt = %d\n",
  121. (epctrl & GR_EPCTRL_NT_MASK) >> GR_EPCTRL_NT_POS);
  122. seq_printf(seq, " Buffer 0: %s %s%d\n",
  123. epstat & GR_EPSTAT_B0 ? "valid" : "invalid",
  124. epstat & GR_EPSTAT_BS ? " " : "selected ",
  125. (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS);
  126. seq_printf(seq, " Buffer 1: %s %s%d\n",
  127. epstat & GR_EPSTAT_B1 ? "valid" : "invalid",
  128. epstat & GR_EPSTAT_BS ? "selected " : " ",
  129. (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS);
  130. if (list_empty(&ep->queue)) {
  131. seq_puts(seq, " Queue: empty\n\n");
  132. return;
  133. }
  134. seq_puts(seq, " Queue:\n");
  135. list_for_each_entry(req, &ep->queue, queue) {
  136. struct gr_dma_desc *desc;
  137. struct gr_dma_desc *next;
  138. seq_printf(seq, " 0x%p: 0x%p %d %d\n", req,
  139. &req->req.buf, req->req.actual, req->req.length);
  140. next = req->first_desc;
  141. do {
  142. desc = next;
  143. next = desc->next_desc;
  144. seq_printf(seq, " %c 0x%p (0x%08x): 0x%05x 0x%08x\n",
  145. desc == req->curr_desc ? 'c' : ' ',
  146. desc, desc->paddr, desc->ctrl, desc->data);
  147. } while (desc != req->last_desc);
  148. }
  149. seq_puts(seq, "\n");
  150. }
  151. static int gr_dfs_show(struct seq_file *seq, void *v)
  152. {
  153. struct gr_udc *dev = seq->private;
  154. u32 control = gr_read32(&dev->regs->control);
  155. u32 status = gr_read32(&dev->regs->status);
  156. struct gr_ep *ep;
  157. seq_printf(seq, "usb state = %s\n",
  158. usb_state_string(dev->gadget.state));
  159. seq_printf(seq, "address = %d\n",
  160. (control & GR_CONTROL_UA_MASK) >> GR_CONTROL_UA_POS);
  161. seq_printf(seq, "speed = %s\n", GR_SPEED_STR(status));
  162. seq_printf(seq, "ep0state = %s\n", gr_ep0state_string(dev->ep0state));
  163. seq_printf(seq, "irq_enabled = %d\n", dev->irq_enabled);
  164. seq_printf(seq, "remote_wakeup = %d\n", dev->remote_wakeup);
  165. seq_printf(seq, "test_mode = %d\n", dev->test_mode);
  166. seq_puts(seq, "\n");
  167. list_for_each_entry(ep, &dev->ep_list, ep_list)
  168. gr_seq_ep_show(seq, ep);
  169. return 0;
  170. }
  171. DEFINE_SHOW_ATTRIBUTE(gr_dfs);
  172. static void gr_dfs_create(struct gr_udc *dev)
  173. {
  174. const char *name = "gr_udc_state";
  175. dev->dfs_root = debugfs_create_dir(dev_name(dev->dev), NULL);
  176. debugfs_create_file(name, 0444, dev->dfs_root, dev, &gr_dfs_fops);
  177. }
  178. static void gr_dfs_delete(struct gr_udc *dev)
  179. {
  180. debugfs_remove_recursive(dev->dfs_root);
  181. }
  182. #else /* !CONFIG_USB_GADGET_DEBUG_FS */
  183. static void gr_dfs_create(struct gr_udc *dev) {}
  184. static void gr_dfs_delete(struct gr_udc *dev) {}
  185. #endif /* CONFIG_USB_GADGET_DEBUG_FS */
  186. /* ---------------------------------------------------------------------- */
  187. /* DMA and request handling */
  188. /* Allocates a new struct gr_dma_desc, sets paddr and zeroes the rest */
  189. static struct gr_dma_desc *gr_alloc_dma_desc(struct gr_ep *ep, gfp_t gfp_flags)
  190. {
  191. dma_addr_t paddr;
  192. struct gr_dma_desc *dma_desc;
  193. dma_desc = dma_pool_zalloc(ep->dev->desc_pool, gfp_flags, &paddr);
  194. if (!dma_desc) {
  195. dev_err(ep->dev->dev, "Could not allocate from DMA pool\n");
  196. return NULL;
  197. }
  198. dma_desc->paddr = paddr;
  199. return dma_desc;
  200. }
  201. static inline void gr_free_dma_desc(struct gr_udc *dev,
  202. struct gr_dma_desc *desc)
  203. {
  204. dma_pool_free(dev->desc_pool, desc, (dma_addr_t)desc->paddr);
  205. }
  206. /* Frees the chain of struct gr_dma_desc for the given request */
  207. static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req)
  208. {
  209. struct gr_dma_desc *desc;
  210. struct gr_dma_desc *next;
  211. next = req->first_desc;
  212. if (!next)
  213. return;
  214. do {
  215. desc = next;
  216. next = desc->next_desc;
  217. gr_free_dma_desc(dev, desc);
  218. } while (desc != req->last_desc);
  219. req->first_desc = NULL;
  220. req->curr_desc = NULL;
  221. req->last_desc = NULL;
  222. }
  223. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
  224. /*
  225. * Frees allocated resources and calls the appropriate completion function/setup
  226. * package handler for a finished request.
  227. *
  228. * Must be called with dev->lock held and irqs disabled.
  229. */
  230. static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
  231. int status)
  232. __releases(&dev->lock)
  233. __acquires(&dev->lock)
  234. {
  235. struct gr_udc *dev;
  236. list_del_init(&req->queue);
  237. if (likely(req->req.status == -EINPROGRESS))
  238. req->req.status = status;
  239. else
  240. status = req->req.status;
  241. dev = ep->dev;
  242. usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
  243. gr_free_dma_desc_chain(dev, req);
  244. if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */
  245. req->req.actual = req->req.length;
  246. } else if (req->oddlen && req->req.actual > req->evenlen) {
  247. /*
  248. * Copy to user buffer in this case where length was not evenly
  249. * divisible by ep->ep.maxpacket and the last descriptor was
  250. * actually used.
  251. */
  252. char *buftail = ((char *)req->req.buf + req->evenlen);
  253. memcpy(buftail, ep->tailbuf, req->oddlen);
  254. if (req->req.actual > req->req.length) {
  255. /* We got more data than was requested */
  256. dev_dbg(ep->dev->dev, "Overflow for ep %s\n",
  257. ep->ep.name);
  258. gr_dbgprint_request("OVFL", ep, req);
  259. req->req.status = -EOVERFLOW;
  260. }
  261. }
  262. if (!status) {
  263. if (ep->is_in)
  264. gr_dbgprint_request("SENT", ep, req);
  265. else
  266. gr_dbgprint_request("RECV", ep, req);
  267. }
  268. /* Prevent changes to ep->queue during callback */
  269. ep->callback = 1;
  270. if (req == dev->ep0reqo && !status) {
  271. if (req->setup)
  272. gr_ep0_setup(dev, req);
  273. else
  274. dev_err(dev->dev,
  275. "Unexpected non setup packet on ep0in\n");
  276. } else if (req->req.complete) {
  277. spin_unlock(&dev->lock);
  278. usb_gadget_giveback_request(&ep->ep, &req->req);
  279. spin_lock(&dev->lock);
  280. }
  281. ep->callback = 0;
  282. }
  283. static struct usb_request *gr_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
  284. {
  285. struct gr_request *req;
  286. req = kzalloc(sizeof(*req), gfp_flags);
  287. if (!req)
  288. return NULL;
  289. INIT_LIST_HEAD(&req->queue);
  290. return &req->req;
  291. }
  292. /*
  293. * Starts DMA for endpoint ep if there are requests in the queue.
  294. *
  295. * Must be called with dev->lock held and with !ep->stopped.
  296. */
  297. static void gr_start_dma(struct gr_ep *ep)
  298. {
  299. struct gr_request *req;
  300. u32 dmactrl;
  301. if (list_empty(&ep->queue)) {
  302. ep->dma_start = 0;
  303. return;
  304. }
  305. req = list_first_entry(&ep->queue, struct gr_request, queue);
  306. /* A descriptor should already have been allocated */
  307. BUG_ON(!req->curr_desc);
  308. /*
  309. * The DMA controller can not handle smaller OUT buffers than
  310. * ep->ep.maxpacket. It could lead to buffer overruns if an unexpectedly
  311. * long packet are received. Therefore an internal bounce buffer gets
  312. * used when such a request gets enabled.
  313. */
  314. if (!ep->is_in && req->oddlen)
  315. req->last_desc->data = ep->tailbuf_paddr;
  316. wmb(); /* Make sure all is settled before handing it over to DMA */
  317. /* Set the descriptor pointer in the hardware */
  318. gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr);
  319. /* Announce available descriptors */
  320. dmactrl = gr_read32(&ep->regs->dmactrl);
  321. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_DA);
  322. ep->dma_start = 1;
  323. }
  324. /*
  325. * Finishes the first request in the ep's queue and, if available, starts the
  326. * next request in queue.
  327. *
  328. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  329. */
  330. static void gr_dma_advance(struct gr_ep *ep, int status)
  331. {
  332. struct gr_request *req;
  333. req = list_first_entry(&ep->queue, struct gr_request, queue);
  334. gr_finish_request(ep, req, status);
  335. gr_start_dma(ep); /* Regardless of ep->dma_start */
  336. }
  337. /*
  338. * Abort DMA for an endpoint. Sets the abort DMA bit which causes an ongoing DMA
  339. * transfer to be canceled and clears GR_DMACTRL_DA.
  340. *
  341. * Must be called with dev->lock held.
  342. */
  343. static void gr_abort_dma(struct gr_ep *ep)
  344. {
  345. u32 dmactrl;
  346. dmactrl = gr_read32(&ep->regs->dmactrl);
  347. gr_write32(&ep->regs->dmactrl, dmactrl | GR_DMACTRL_AD);
  348. }
  349. /*
  350. * Allocates and sets up a struct gr_dma_desc and putting it on the descriptor
  351. * chain.
  352. *
  353. * Size is not used for OUT endpoints. Hardware can not be instructed to handle
  354. * smaller buffer than MAXPL in the OUT direction.
  355. */
  356. static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req,
  357. dma_addr_t data, unsigned size, gfp_t gfp_flags)
  358. {
  359. struct gr_dma_desc *desc;
  360. desc = gr_alloc_dma_desc(ep, gfp_flags);
  361. if (!desc)
  362. return -ENOMEM;
  363. desc->data = data;
  364. if (ep->is_in)
  365. desc->ctrl =
  366. (GR_DESC_IN_CTRL_LEN_MASK & size) | GR_DESC_IN_CTRL_EN;
  367. else
  368. desc->ctrl = GR_DESC_OUT_CTRL_IE;
  369. if (!req->first_desc) {
  370. req->first_desc = desc;
  371. req->curr_desc = desc;
  372. } else {
  373. req->last_desc->next_desc = desc;
  374. req->last_desc->next = desc->paddr;
  375. req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX;
  376. }
  377. req->last_desc = desc;
  378. return 0;
  379. }
  380. /*
  381. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  382. * together covers req->req.length bytes of the buffer at DMA address
  383. * req->req.dma for the OUT direction.
  384. *
  385. * The first descriptor in the chain is enabled, the rest disabled. The
  386. * interrupt handler will later enable them one by one when needed so we can
  387. * find out when the transfer is finished. For OUT endpoints, all descriptors
  388. * therefore generate interrutps.
  389. */
  390. static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req,
  391. gfp_t gfp_flags)
  392. {
  393. u16 bytes_left; /* Bytes left to provide descriptors for */
  394. u16 bytes_used; /* Bytes accommodated for */
  395. int ret = 0;
  396. req->first_desc = NULL; /* Signals that no allocation is done yet */
  397. bytes_left = req->req.length;
  398. bytes_used = 0;
  399. while (bytes_left > 0) {
  400. dma_addr_t start = req->req.dma + bytes_used;
  401. u16 size = min(bytes_left, ep->bytes_per_buffer);
  402. if (size < ep->bytes_per_buffer) {
  403. /* Prepare using bounce buffer */
  404. req->evenlen = req->req.length - bytes_left;
  405. req->oddlen = size;
  406. }
  407. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  408. if (ret)
  409. goto alloc_err;
  410. bytes_left -= size;
  411. bytes_used += size;
  412. }
  413. req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  414. return 0;
  415. alloc_err:
  416. gr_free_dma_desc_chain(ep->dev, req);
  417. return ret;
  418. }
  419. /*
  420. * Sets up a chain of struct gr_dma_descriptors pointing to buffers that
  421. * together covers req->req.length bytes of the buffer at DMA address
  422. * req->req.dma for the IN direction.
  423. *
  424. * When more data is provided than the maximum payload size, the hardware splits
  425. * this up into several payloads automatically. Moreover, ep->bytes_per_buffer
  426. * is always set to a multiple of the maximum payload (restricted to the valid
  427. * number of maximum payloads during high bandwidth isochronous or interrupt
  428. * transfers)
  429. *
  430. * All descriptors are enabled from the beginning and we only generate an
  431. * interrupt for the last one indicating that the entire request has been pushed
  432. * to hardware.
  433. */
  434. static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req,
  435. gfp_t gfp_flags)
  436. {
  437. u16 bytes_left; /* Bytes left in req to provide descriptors for */
  438. u16 bytes_used; /* Bytes in req accommodated for */
  439. int ret = 0;
  440. req->first_desc = NULL; /* Signals that no allocation is done yet */
  441. bytes_left = req->req.length;
  442. bytes_used = 0;
  443. do { /* Allow for zero length packets */
  444. dma_addr_t start = req->req.dma + bytes_used;
  445. u16 size = min(bytes_left, ep->bytes_per_buffer);
  446. ret = gr_add_dma_desc(ep, req, start, size, gfp_flags);
  447. if (ret)
  448. goto alloc_err;
  449. bytes_left -= size;
  450. bytes_used += size;
  451. } while (bytes_left > 0);
  452. /*
  453. * Send an extra zero length packet to indicate that no more data is
  454. * available when req->req.zero is set and the data length is even
  455. * multiples of ep->ep.maxpacket.
  456. */
  457. if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) {
  458. ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags);
  459. if (ret)
  460. goto alloc_err;
  461. }
  462. /*
  463. * For IN packets we only want to know when the last packet has been
  464. * transmitted (not just put into internal buffers).
  465. */
  466. req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI;
  467. return 0;
  468. alloc_err:
  469. gr_free_dma_desc_chain(ep->dev, req);
  470. return ret;
  471. }
  472. /* Must be called with dev->lock held */
  473. static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags)
  474. {
  475. struct gr_udc *dev = ep->dev;
  476. int ret;
  477. if (unlikely(!ep->ep.desc && ep->num != 0)) {
  478. dev_err(dev->dev, "No ep descriptor for %s\n", ep->ep.name);
  479. return -EINVAL;
  480. }
  481. if (unlikely(!req->req.buf || !list_empty(&req->queue))) {
  482. dev_err(dev->dev,
  483. "Invalid request for %s: buf=%p list_empty=%d\n",
  484. ep->ep.name, req->req.buf, list_empty(&req->queue));
  485. return -EINVAL;
  486. }
  487. if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
  488. dev_err(dev->dev, "-ESHUTDOWN");
  489. return -ESHUTDOWN;
  490. }
  491. /* Can't touch registers when suspended */
  492. if (dev->ep0state == GR_EP0_SUSPEND) {
  493. dev_err(dev->dev, "-EBUSY");
  494. return -EBUSY;
  495. }
  496. /* Set up DMA mapping in case the caller didn't */
  497. ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in);
  498. if (ret) {
  499. dev_err(dev->dev, "usb_gadget_map_request");
  500. return ret;
  501. }
  502. if (ep->is_in)
  503. ret = gr_setup_in_desc_list(ep, req, gfp_flags);
  504. else
  505. ret = gr_setup_out_desc_list(ep, req, gfp_flags);
  506. if (ret)
  507. return ret;
  508. req->req.status = -EINPROGRESS;
  509. req->req.actual = 0;
  510. list_add_tail(&req->queue, &ep->queue);
  511. /* Start DMA if not started, otherwise interrupt handler handles it */
  512. if (!ep->dma_start && likely(!ep->stopped))
  513. gr_start_dma(ep);
  514. return 0;
  515. }
  516. /*
  517. * Queue a request from within the driver.
  518. *
  519. * Must be called with dev->lock held.
  520. */
  521. static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req,
  522. gfp_t gfp_flags)
  523. {
  524. if (ep->is_in)
  525. gr_dbgprint_request("RESP", ep, req);
  526. return gr_queue(ep, req, gfp_flags);
  527. }
  528. /* ---------------------------------------------------------------------- */
  529. /* General helper functions */
  530. /*
  531. * Dequeue ALL requests.
  532. *
  533. * Must be called with dev->lock held and irqs disabled.
  534. */
  535. static void gr_ep_nuke(struct gr_ep *ep)
  536. {
  537. struct gr_request *req;
  538. ep->stopped = 1;
  539. ep->dma_start = 0;
  540. gr_abort_dma(ep);
  541. while (!list_empty(&ep->queue)) {
  542. req = list_first_entry(&ep->queue, struct gr_request, queue);
  543. gr_finish_request(ep, req, -ESHUTDOWN);
  544. }
  545. }
  546. /*
  547. * Reset the hardware state of this endpoint.
  548. *
  549. * Must be called with dev->lock held.
  550. */
  551. static void gr_ep_reset(struct gr_ep *ep)
  552. {
  553. gr_write32(&ep->regs->epctrl, 0);
  554. gr_write32(&ep->regs->dmactrl, 0);
  555. ep->ep.maxpacket = MAX_CTRL_PL_SIZE;
  556. ep->ep.desc = NULL;
  557. ep->stopped = 1;
  558. ep->dma_start = 0;
  559. }
  560. /*
  561. * Generate STALL on ep0in/out.
  562. *
  563. * Must be called with dev->lock held.
  564. */
  565. static void gr_control_stall(struct gr_udc *dev)
  566. {
  567. u32 epctrl;
  568. epctrl = gr_read32(&dev->epo[0].regs->epctrl);
  569. gr_write32(&dev->epo[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  570. epctrl = gr_read32(&dev->epi[0].regs->epctrl);
  571. gr_write32(&dev->epi[0].regs->epctrl, epctrl | GR_EPCTRL_CS);
  572. dev->ep0state = GR_EP0_STALL;
  573. }
  574. /*
  575. * Halts, halts and wedges, or clears halt for an endpoint.
  576. *
  577. * Must be called with dev->lock held.
  578. */
  579. static int gr_ep_halt_wedge(struct gr_ep *ep, int halt, int wedge, int fromhost)
  580. {
  581. u32 epctrl;
  582. int retval = 0;
  583. if (ep->num && !ep->ep.desc)
  584. return -EINVAL;
  585. if (ep->num && ep->ep.desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)
  586. return -EOPNOTSUPP;
  587. /* Never actually halt ep0, and therefore never clear halt for ep0 */
  588. if (!ep->num) {
  589. if (halt && !fromhost) {
  590. /* ep0 halt from gadget - generate protocol stall */
  591. gr_control_stall(ep->dev);
  592. dev_dbg(ep->dev->dev, "EP: stall ep0\n");
  593. return 0;
  594. }
  595. return -EINVAL;
  596. }
  597. dev_dbg(ep->dev->dev, "EP: %s halt %s\n",
  598. (halt ? (wedge ? "wedge" : "set") : "clear"), ep->ep.name);
  599. epctrl = gr_read32(&ep->regs->epctrl);
  600. if (halt) {
  601. /* Set HALT */
  602. gr_write32(&ep->regs->epctrl, epctrl | GR_EPCTRL_EH);
  603. ep->stopped = 1;
  604. if (wedge)
  605. ep->wedged = 1;
  606. } else {
  607. gr_write32(&ep->regs->epctrl, epctrl & ~GR_EPCTRL_EH);
  608. ep->stopped = 0;
  609. ep->wedged = 0;
  610. /* Things might have been queued up in the meantime */
  611. if (!ep->dma_start)
  612. gr_start_dma(ep);
  613. }
  614. return retval;
  615. }
  616. /* Must be called with dev->lock held */
  617. static inline void gr_set_ep0state(struct gr_udc *dev, enum gr_ep0state value)
  618. {
  619. if (dev->ep0state != value)
  620. dev_vdbg(dev->dev, "STATE: ep0state=%s\n",
  621. gr_ep0state_string(value));
  622. dev->ep0state = value;
  623. }
  624. /*
  625. * Should only be called when endpoints can not generate interrupts.
  626. *
  627. * Must be called with dev->lock held.
  628. */
  629. static void gr_disable_interrupts_and_pullup(struct gr_udc *dev)
  630. {
  631. gr_write32(&dev->regs->control, 0);
  632. wmb(); /* Make sure that we do not deny one of our interrupts */
  633. dev->irq_enabled = 0;
  634. }
  635. /*
  636. * Stop all device activity and disable data line pullup.
  637. *
  638. * Must be called with dev->lock held and irqs disabled.
  639. */
  640. static void gr_stop_activity(struct gr_udc *dev)
  641. {
  642. struct gr_ep *ep;
  643. list_for_each_entry(ep, &dev->ep_list, ep_list)
  644. gr_ep_nuke(ep);
  645. gr_disable_interrupts_and_pullup(dev);
  646. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  647. usb_gadget_set_state(&dev->gadget, USB_STATE_NOTATTACHED);
  648. }
  649. /* ---------------------------------------------------------------------- */
  650. /* ep0 setup packet handling */
  651. static void gr_ep0_testmode_complete(struct usb_ep *_ep,
  652. struct usb_request *_req)
  653. {
  654. struct gr_ep *ep;
  655. struct gr_udc *dev;
  656. u32 control;
  657. ep = container_of(_ep, struct gr_ep, ep);
  658. dev = ep->dev;
  659. spin_lock(&dev->lock);
  660. control = gr_read32(&dev->regs->control);
  661. control |= GR_CONTROL_TM | (dev->test_mode << GR_CONTROL_TS_POS);
  662. gr_write32(&dev->regs->control, control);
  663. spin_unlock(&dev->lock);
  664. }
  665. static void gr_ep0_dummy_complete(struct usb_ep *_ep, struct usb_request *_req)
  666. {
  667. /* Nothing needs to be done here */
  668. }
  669. /*
  670. * Queue a response on ep0in.
  671. *
  672. * Must be called with dev->lock held.
  673. */
  674. static int gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length,
  675. void (*complete)(struct usb_ep *ep,
  676. struct usb_request *req))
  677. {
  678. u8 *reqbuf = dev->ep0reqi->req.buf;
  679. int status;
  680. int i;
  681. for (i = 0; i < length; i++)
  682. reqbuf[i] = buf[i];
  683. dev->ep0reqi->req.length = length;
  684. dev->ep0reqi->req.complete = complete;
  685. status = gr_queue_int(&dev->epi[0], dev->ep0reqi, GFP_ATOMIC);
  686. if (status < 0)
  687. dev_err(dev->dev,
  688. "Could not queue ep0in setup response: %d\n", status);
  689. return status;
  690. }
  691. /*
  692. * Queue a 2 byte response on ep0in.
  693. *
  694. * Must be called with dev->lock held.
  695. */
  696. static inline int gr_ep0_respond_u16(struct gr_udc *dev, u16 response)
  697. {
  698. __le16 le_response = cpu_to_le16(response);
  699. return gr_ep0_respond(dev, (u8 *)&le_response, 2,
  700. gr_ep0_dummy_complete);
  701. }
  702. /*
  703. * Queue a ZLP response on ep0in.
  704. *
  705. * Must be called with dev->lock held.
  706. */
  707. static inline int gr_ep0_respond_empty(struct gr_udc *dev)
  708. {
  709. return gr_ep0_respond(dev, NULL, 0, gr_ep0_dummy_complete);
  710. }
  711. /*
  712. * This is run when a SET_ADDRESS request is received. First writes
  713. * the new address to the control register which is updated internally
  714. * when the next IN packet is ACKED.
  715. *
  716. * Must be called with dev->lock held.
  717. */
  718. static void gr_set_address(struct gr_udc *dev, u8 address)
  719. {
  720. u32 control;
  721. control = gr_read32(&dev->regs->control) & ~GR_CONTROL_UA_MASK;
  722. control |= (address << GR_CONTROL_UA_POS) & GR_CONTROL_UA_MASK;
  723. control |= GR_CONTROL_SU;
  724. gr_write32(&dev->regs->control, control);
  725. }
  726. /*
  727. * Returns negative for STALL, 0 for successful handling and positive for
  728. * delegation.
  729. *
  730. * Must be called with dev->lock held.
  731. */
  732. static int gr_device_request(struct gr_udc *dev, u8 type, u8 request,
  733. u16 value, u16 index)
  734. {
  735. u16 response;
  736. u8 test;
  737. switch (request) {
  738. case USB_REQ_SET_ADDRESS:
  739. dev_dbg(dev->dev, "STATUS: address %d\n", value & 0xff);
  740. gr_set_address(dev, value & 0xff);
  741. if (value)
  742. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  743. else
  744. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  745. return gr_ep0_respond_empty(dev);
  746. case USB_REQ_GET_STATUS:
  747. /* Self powered | remote wakeup */
  748. response = 0x0001 | (dev->remote_wakeup ? 0x0002 : 0);
  749. return gr_ep0_respond_u16(dev, response);
  750. case USB_REQ_SET_FEATURE:
  751. switch (value) {
  752. case USB_DEVICE_REMOTE_WAKEUP:
  753. /* Allow remote wakeup */
  754. dev->remote_wakeup = 1;
  755. return gr_ep0_respond_empty(dev);
  756. case USB_DEVICE_TEST_MODE:
  757. /* The hardware does not support TEST_FORCE_EN */
  758. test = index >> 8;
  759. if (test >= TEST_J && test <= TEST_PACKET) {
  760. dev->test_mode = test;
  761. return gr_ep0_respond(dev, NULL, 0,
  762. gr_ep0_testmode_complete);
  763. }
  764. }
  765. break;
  766. case USB_REQ_CLEAR_FEATURE:
  767. switch (value) {
  768. case USB_DEVICE_REMOTE_WAKEUP:
  769. /* Disallow remote wakeup */
  770. dev->remote_wakeup = 0;
  771. return gr_ep0_respond_empty(dev);
  772. }
  773. break;
  774. }
  775. return 1; /* Delegate the rest */
  776. }
  777. /*
  778. * Returns negative for STALL, 0 for successful handling and positive for
  779. * delegation.
  780. *
  781. * Must be called with dev->lock held.
  782. */
  783. static int gr_interface_request(struct gr_udc *dev, u8 type, u8 request,
  784. u16 value, u16 index)
  785. {
  786. if (dev->gadget.state != USB_STATE_CONFIGURED)
  787. return -1;
  788. /*
  789. * Should return STALL for invalid interfaces, but udc driver does not
  790. * know anything about that. However, many gadget drivers do not handle
  791. * GET_STATUS so we need to take care of that.
  792. */
  793. switch (request) {
  794. case USB_REQ_GET_STATUS:
  795. return gr_ep0_respond_u16(dev, 0x0000);
  796. case USB_REQ_SET_FEATURE:
  797. case USB_REQ_CLEAR_FEATURE:
  798. /*
  799. * No possible valid standard requests. Still let gadget drivers
  800. * have a go at it.
  801. */
  802. break;
  803. }
  804. return 1; /* Delegate the rest */
  805. }
  806. /*
  807. * Returns negative for STALL, 0 for successful handling and positive for
  808. * delegation.
  809. *
  810. * Must be called with dev->lock held.
  811. */
  812. static int gr_endpoint_request(struct gr_udc *dev, u8 type, u8 request,
  813. u16 value, u16 index)
  814. {
  815. struct gr_ep *ep;
  816. int status;
  817. int halted;
  818. u8 epnum = index & USB_ENDPOINT_NUMBER_MASK;
  819. u8 is_in = index & USB_ENDPOINT_DIR_MASK;
  820. if ((is_in && epnum >= dev->nepi) || (!is_in && epnum >= dev->nepo))
  821. return -1;
  822. if (dev->gadget.state != USB_STATE_CONFIGURED && epnum != 0)
  823. return -1;
  824. ep = (is_in ? &dev->epi[epnum] : &dev->epo[epnum]);
  825. switch (request) {
  826. case USB_REQ_GET_STATUS:
  827. halted = gr_read32(&ep->regs->epctrl) & GR_EPCTRL_EH;
  828. return gr_ep0_respond_u16(dev, halted ? 0x0001 : 0);
  829. case USB_REQ_SET_FEATURE:
  830. switch (value) {
  831. case USB_ENDPOINT_HALT:
  832. status = gr_ep_halt_wedge(ep, 1, 0, 1);
  833. if (status >= 0)
  834. status = gr_ep0_respond_empty(dev);
  835. return status;
  836. }
  837. break;
  838. case USB_REQ_CLEAR_FEATURE:
  839. switch (value) {
  840. case USB_ENDPOINT_HALT:
  841. if (ep->wedged)
  842. return -1;
  843. status = gr_ep_halt_wedge(ep, 0, 0, 1);
  844. if (status >= 0)
  845. status = gr_ep0_respond_empty(dev);
  846. return status;
  847. }
  848. break;
  849. }
  850. return 1; /* Delegate the rest */
  851. }
  852. /* Must be called with dev->lock held */
  853. static void gr_ep0out_requeue(struct gr_udc *dev)
  854. {
  855. int ret = gr_queue_int(&dev->epo[0], dev->ep0reqo, GFP_ATOMIC);
  856. if (ret)
  857. dev_err(dev->dev, "Could not queue ep0out setup request: %d\n",
  858. ret);
  859. }
  860. /*
  861. * The main function dealing with setup requests on ep0.
  862. *
  863. * Must be called with dev->lock held and irqs disabled
  864. */
  865. static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
  866. __releases(&dev->lock)
  867. __acquires(&dev->lock)
  868. {
  869. union {
  870. struct usb_ctrlrequest ctrl;
  871. u8 raw[8];
  872. u32 word[2];
  873. } u;
  874. u8 type;
  875. u8 request;
  876. u16 value;
  877. u16 index;
  878. u16 length;
  879. int i;
  880. int status;
  881. /* Restore from ep0 halt */
  882. if (dev->ep0state == GR_EP0_STALL) {
  883. gr_set_ep0state(dev, GR_EP0_SETUP);
  884. if (!req->req.actual)
  885. goto out;
  886. }
  887. if (dev->ep0state == GR_EP0_ISTATUS) {
  888. gr_set_ep0state(dev, GR_EP0_SETUP);
  889. if (req->req.actual > 0)
  890. dev_dbg(dev->dev,
  891. "Unexpected setup packet at state %s\n",
  892. gr_ep0state_string(GR_EP0_ISTATUS));
  893. else
  894. goto out; /* Got expected ZLP */
  895. } else if (dev->ep0state != GR_EP0_SETUP) {
  896. dev_info(dev->dev,
  897. "Unexpected ep0out request at state %s - stalling\n",
  898. gr_ep0state_string(dev->ep0state));
  899. gr_control_stall(dev);
  900. gr_set_ep0state(dev, GR_EP0_SETUP);
  901. goto out;
  902. } else if (!req->req.actual) {
  903. dev_dbg(dev->dev, "Unexpected ZLP at state %s\n",
  904. gr_ep0state_string(dev->ep0state));
  905. goto out;
  906. }
  907. /* Handle SETUP packet */
  908. for (i = 0; i < req->req.actual; i++)
  909. u.raw[i] = ((u8 *)req->req.buf)[i];
  910. type = u.ctrl.bRequestType;
  911. request = u.ctrl.bRequest;
  912. value = le16_to_cpu(u.ctrl.wValue);
  913. index = le16_to_cpu(u.ctrl.wIndex);
  914. length = le16_to_cpu(u.ctrl.wLength);
  915. gr_dbgprint_devreq(dev, type, request, value, index, length);
  916. /* Check for data stage */
  917. if (length) {
  918. if (type & USB_DIR_IN)
  919. gr_set_ep0state(dev, GR_EP0_IDATA);
  920. else
  921. gr_set_ep0state(dev, GR_EP0_ODATA);
  922. }
  923. status = 1; /* Positive status flags delegation */
  924. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
  925. switch (type & USB_RECIP_MASK) {
  926. case USB_RECIP_DEVICE:
  927. status = gr_device_request(dev, type, request,
  928. value, index);
  929. break;
  930. case USB_RECIP_ENDPOINT:
  931. status = gr_endpoint_request(dev, type, request,
  932. value, index);
  933. break;
  934. case USB_RECIP_INTERFACE:
  935. status = gr_interface_request(dev, type, request,
  936. value, index);
  937. break;
  938. }
  939. }
  940. if (status > 0) {
  941. spin_unlock(&dev->lock);
  942. dev_vdbg(dev->dev, "DELEGATE\n");
  943. status = dev->driver->setup(&dev->gadget, &u.ctrl);
  944. spin_lock(&dev->lock);
  945. }
  946. /* Generate STALL on both ep0out and ep0in if requested */
  947. if (unlikely(status < 0)) {
  948. dev_vdbg(dev->dev, "STALL\n");
  949. gr_control_stall(dev);
  950. }
  951. if ((type & USB_TYPE_MASK) == USB_TYPE_STANDARD &&
  952. request == USB_REQ_SET_CONFIGURATION) {
  953. if (!value) {
  954. dev_dbg(dev->dev, "STATUS: deconfigured\n");
  955. usb_gadget_set_state(&dev->gadget, USB_STATE_ADDRESS);
  956. } else if (status >= 0) {
  957. /* Not configured unless gadget OK:s it */
  958. dev_dbg(dev->dev, "STATUS: configured: %d\n", value);
  959. usb_gadget_set_state(&dev->gadget,
  960. USB_STATE_CONFIGURED);
  961. }
  962. }
  963. /* Get ready for next stage */
  964. if (dev->ep0state == GR_EP0_ODATA)
  965. gr_set_ep0state(dev, GR_EP0_OSTATUS);
  966. else if (dev->ep0state == GR_EP0_IDATA)
  967. gr_set_ep0state(dev, GR_EP0_ISTATUS);
  968. else
  969. gr_set_ep0state(dev, GR_EP0_SETUP);
  970. out:
  971. gr_ep0out_requeue(dev);
  972. }
  973. /* ---------------------------------------------------------------------- */
  974. /* VBUS and USB reset handling */
  975. /* Must be called with dev->lock held and irqs disabled */
  976. static void gr_vbus_connected(struct gr_udc *dev, u32 status)
  977. {
  978. u32 control;
  979. dev->gadget.speed = GR_SPEED(status);
  980. usb_gadget_set_state(&dev->gadget, USB_STATE_POWERED);
  981. /* Turn on full interrupts and pullup */
  982. control = (GR_CONTROL_SI | GR_CONTROL_UI | GR_CONTROL_VI |
  983. GR_CONTROL_SP | GR_CONTROL_EP);
  984. gr_write32(&dev->regs->control, control);
  985. }
  986. /* Must be called with dev->lock held */
  987. static void gr_enable_vbus_detect(struct gr_udc *dev)
  988. {
  989. u32 status;
  990. dev->irq_enabled = 1;
  991. wmb(); /* Make sure we do not ignore an interrupt */
  992. gr_write32(&dev->regs->control, GR_CONTROL_VI);
  993. /* Take care of the case we are already plugged in at this point */
  994. status = gr_read32(&dev->regs->status);
  995. if (status & GR_STATUS_VB)
  996. gr_vbus_connected(dev, status);
  997. }
  998. /* Must be called with dev->lock held and irqs disabled */
  999. static void gr_vbus_disconnected(struct gr_udc *dev)
  1000. {
  1001. gr_stop_activity(dev);
  1002. /* Report disconnect */
  1003. if (dev->driver && dev->driver->disconnect) {
  1004. spin_unlock(&dev->lock);
  1005. dev->driver->disconnect(&dev->gadget);
  1006. spin_lock(&dev->lock);
  1007. }
  1008. gr_enable_vbus_detect(dev);
  1009. }
  1010. /* Must be called with dev->lock held and irqs disabled */
  1011. static void gr_udc_usbreset(struct gr_udc *dev, u32 status)
  1012. {
  1013. gr_set_address(dev, 0);
  1014. gr_set_ep0state(dev, GR_EP0_SETUP);
  1015. usb_gadget_set_state(&dev->gadget, USB_STATE_DEFAULT);
  1016. dev->gadget.speed = GR_SPEED(status);
  1017. gr_ep_nuke(&dev->epo[0]);
  1018. gr_ep_nuke(&dev->epi[0]);
  1019. dev->epo[0].stopped = 0;
  1020. dev->epi[0].stopped = 0;
  1021. gr_ep0out_requeue(dev);
  1022. }
  1023. /* ---------------------------------------------------------------------- */
  1024. /* Irq handling */
  1025. /*
  1026. * Handles interrupts from in endpoints. Returns whether something was handled.
  1027. *
  1028. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1029. */
  1030. static int gr_handle_in_ep(struct gr_ep *ep)
  1031. {
  1032. struct gr_request *req;
  1033. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1034. if (!req->last_desc)
  1035. return 0;
  1036. if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN)
  1037. return 0; /* Not put in hardware buffers yet */
  1038. if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0))
  1039. return 0; /* Not transmitted yet, still in hardware buffers */
  1040. /* Write complete */
  1041. gr_dma_advance(ep, 0);
  1042. return 1;
  1043. }
  1044. /*
  1045. * Handles interrupts from out endpoints. Returns whether something was handled.
  1046. *
  1047. * Must be called with dev->lock held, irqs disabled and with !ep->stopped.
  1048. */
  1049. static int gr_handle_out_ep(struct gr_ep *ep)
  1050. {
  1051. u32 ep_dmactrl;
  1052. u32 ctrl;
  1053. u16 len;
  1054. struct gr_request *req;
  1055. struct gr_udc *dev = ep->dev;
  1056. req = list_first_entry(&ep->queue, struct gr_request, queue);
  1057. if (!req->curr_desc)
  1058. return 0;
  1059. ctrl = READ_ONCE(req->curr_desc->ctrl);
  1060. if (ctrl & GR_DESC_OUT_CTRL_EN)
  1061. return 0; /* Not received yet */
  1062. /* Read complete */
  1063. len = ctrl & GR_DESC_OUT_CTRL_LEN_MASK;
  1064. req->req.actual += len;
  1065. if (ctrl & GR_DESC_OUT_CTRL_SE)
  1066. req->setup = 1;
  1067. if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) {
  1068. /* Short packet or >= expected size - we are done */
  1069. if ((ep == &dev->epo[0]) && (dev->ep0state == GR_EP0_OSTATUS)) {
  1070. /*
  1071. * Send a status stage ZLP to ack the DATA stage in the
  1072. * OUT direction. This needs to be done before
  1073. * gr_dma_advance as that can lead to a call to
  1074. * ep0_setup that can change dev->ep0state.
  1075. */
  1076. gr_ep0_respond_empty(dev);
  1077. gr_set_ep0state(dev, GR_EP0_SETUP);
  1078. }
  1079. gr_dma_advance(ep, 0);
  1080. } else {
  1081. /* Not done yet. Enable the next descriptor to receive more. */
  1082. req->curr_desc = req->curr_desc->next_desc;
  1083. req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN;
  1084. ep_dmactrl = gr_read32(&ep->regs->dmactrl);
  1085. gr_write32(&ep->regs->dmactrl, ep_dmactrl | GR_DMACTRL_DA);
  1086. }
  1087. return 1;
  1088. }
  1089. /*
  1090. * Handle state changes. Returns whether something was handled.
  1091. *
  1092. * Must be called with dev->lock held and irqs disabled.
  1093. */
  1094. static int gr_handle_state_changes(struct gr_udc *dev)
  1095. {
  1096. u32 status = gr_read32(&dev->regs->status);
  1097. int handled = 0;
  1098. int powstate = !(dev->gadget.state == USB_STATE_NOTATTACHED ||
  1099. dev->gadget.state == USB_STATE_ATTACHED);
  1100. /* VBUS valid detected */
  1101. if (!powstate && (status & GR_STATUS_VB)) {
  1102. dev_dbg(dev->dev, "STATUS: vbus valid detected\n");
  1103. gr_vbus_connected(dev, status);
  1104. handled = 1;
  1105. }
  1106. /* Disconnect */
  1107. if (powstate && !(status & GR_STATUS_VB)) {
  1108. dev_dbg(dev->dev, "STATUS: vbus invalid detected\n");
  1109. gr_vbus_disconnected(dev);
  1110. handled = 1;
  1111. }
  1112. /* USB reset detected */
  1113. if (status & GR_STATUS_UR) {
  1114. dev_dbg(dev->dev, "STATUS: USB reset - speed is %s\n",
  1115. GR_SPEED_STR(status));
  1116. gr_write32(&dev->regs->status, GR_STATUS_UR);
  1117. gr_udc_usbreset(dev, status);
  1118. handled = 1;
  1119. }
  1120. /* Speed change */
  1121. if (dev->gadget.speed != GR_SPEED(status)) {
  1122. dev_dbg(dev->dev, "STATUS: USB Speed change to %s\n",
  1123. GR_SPEED_STR(status));
  1124. dev->gadget.speed = GR_SPEED(status);
  1125. handled = 1;
  1126. }
  1127. /* Going into suspend */
  1128. if ((dev->ep0state != GR_EP0_SUSPEND) && !(status & GR_STATUS_SU)) {
  1129. dev_dbg(dev->dev, "STATUS: USB suspend\n");
  1130. gr_set_ep0state(dev, GR_EP0_SUSPEND);
  1131. dev->suspended_from = dev->gadget.state;
  1132. usb_gadget_set_state(&dev->gadget, USB_STATE_SUSPENDED);
  1133. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1134. dev->driver && dev->driver->suspend) {
  1135. spin_unlock(&dev->lock);
  1136. dev->driver->suspend(&dev->gadget);
  1137. spin_lock(&dev->lock);
  1138. }
  1139. handled = 1;
  1140. }
  1141. /* Coming out of suspend */
  1142. if ((dev->ep0state == GR_EP0_SUSPEND) && (status & GR_STATUS_SU)) {
  1143. dev_dbg(dev->dev, "STATUS: USB resume\n");
  1144. if (dev->suspended_from == USB_STATE_POWERED)
  1145. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1146. else
  1147. gr_set_ep0state(dev, GR_EP0_SETUP);
  1148. usb_gadget_set_state(&dev->gadget, dev->suspended_from);
  1149. if ((dev->gadget.speed != USB_SPEED_UNKNOWN) &&
  1150. dev->driver && dev->driver->resume) {
  1151. spin_unlock(&dev->lock);
  1152. dev->driver->resume(&dev->gadget);
  1153. spin_lock(&dev->lock);
  1154. }
  1155. handled = 1;
  1156. }
  1157. return handled;
  1158. }
  1159. /* Non-interrupt context irq handler */
  1160. static irqreturn_t gr_irq_handler(int irq, void *_dev)
  1161. {
  1162. struct gr_udc *dev = _dev;
  1163. struct gr_ep *ep;
  1164. int handled = 0;
  1165. int i;
  1166. unsigned long flags;
  1167. spin_lock_irqsave(&dev->lock, flags);
  1168. if (!dev->irq_enabled)
  1169. goto out;
  1170. /*
  1171. * Check IN ep interrupts. We check these before the OUT eps because
  1172. * some gadgets reuse the request that might already be currently
  1173. * outstanding and needs to be completed (mainly setup requests).
  1174. */
  1175. for (i = 0; i < dev->nepi; i++) {
  1176. ep = &dev->epi[i];
  1177. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1178. handled = gr_handle_in_ep(ep) || handled;
  1179. }
  1180. /* Check OUT ep interrupts */
  1181. for (i = 0; i < dev->nepo; i++) {
  1182. ep = &dev->epo[i];
  1183. if (!ep->stopped && !ep->callback && !list_empty(&ep->queue))
  1184. handled = gr_handle_out_ep(ep) || handled;
  1185. }
  1186. /* Check status interrupts */
  1187. handled = gr_handle_state_changes(dev) || handled;
  1188. /*
  1189. * Check AMBA DMA errors. Only check if we didn't find anything else to
  1190. * handle because this shouldn't happen if we did everything right.
  1191. */
  1192. if (!handled) {
  1193. list_for_each_entry(ep, &dev->ep_list, ep_list) {
  1194. if (gr_read32(&ep->regs->dmactrl) & GR_DMACTRL_AE) {
  1195. dev_err(dev->dev,
  1196. "AMBA Error occurred for %s\n",
  1197. ep->ep.name);
  1198. handled = 1;
  1199. }
  1200. }
  1201. }
  1202. out:
  1203. spin_unlock_irqrestore(&dev->lock, flags);
  1204. return handled ? IRQ_HANDLED : IRQ_NONE;
  1205. }
  1206. /* Interrupt context irq handler */
  1207. static irqreturn_t gr_irq(int irq, void *_dev)
  1208. {
  1209. struct gr_udc *dev = _dev;
  1210. if (!dev->irq_enabled)
  1211. return IRQ_NONE;
  1212. return IRQ_WAKE_THREAD;
  1213. }
  1214. /* ---------------------------------------------------------------------- */
  1215. /* USB ep ops */
  1216. /* Enable endpoint. Not for ep0in and ep0out that are handled separately. */
  1217. static int gr_ep_enable(struct usb_ep *_ep,
  1218. const struct usb_endpoint_descriptor *desc)
  1219. {
  1220. struct gr_udc *dev;
  1221. struct gr_ep *ep;
  1222. u8 mode;
  1223. u8 nt;
  1224. u16 max;
  1225. u16 buffer_size = 0;
  1226. u32 epctrl;
  1227. ep = container_of(_ep, struct gr_ep, ep);
  1228. if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT)
  1229. return -EINVAL;
  1230. dev = ep->dev;
  1231. /* 'ep0' IN and OUT are reserved */
  1232. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1233. return -EINVAL;
  1234. if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
  1235. return -ESHUTDOWN;
  1236. /* Make sure we are clear for enabling */
  1237. epctrl = gr_read32(&ep->regs->epctrl);
  1238. if (epctrl & GR_EPCTRL_EV)
  1239. return -EBUSY;
  1240. /* Check that directions match */
  1241. if (!ep->is_in != !usb_endpoint_dir_in(desc))
  1242. return -EINVAL;
  1243. /* Check ep num */
  1244. if ((!ep->is_in && ep->num >= dev->nepo) ||
  1245. (ep->is_in && ep->num >= dev->nepi))
  1246. return -EINVAL;
  1247. if (usb_endpoint_xfer_control(desc)) {
  1248. mode = 0;
  1249. } else if (usb_endpoint_xfer_isoc(desc)) {
  1250. mode = 1;
  1251. } else if (usb_endpoint_xfer_bulk(desc)) {
  1252. mode = 2;
  1253. } else if (usb_endpoint_xfer_int(desc)) {
  1254. mode = 3;
  1255. } else {
  1256. dev_err(dev->dev, "Unknown transfer type for %s\n",
  1257. ep->ep.name);
  1258. return -EINVAL;
  1259. }
  1260. /*
  1261. * Bits 10-0 set the max payload. 12-11 set the number of
  1262. * additional transactions.
  1263. */
  1264. max = usb_endpoint_maxp(desc);
  1265. nt = usb_endpoint_maxp_mult(desc) - 1;
  1266. buffer_size = GR_BUFFER_SIZE(epctrl);
  1267. if (nt && (mode == 0 || mode == 2)) {
  1268. dev_err(dev->dev,
  1269. "%s mode: multiple trans./microframe not valid\n",
  1270. (mode == 2 ? "Bulk" : "Control"));
  1271. return -EINVAL;
  1272. } else if (nt == 0x3) {
  1273. dev_err(dev->dev,
  1274. "Invalid value 0x3 for additional trans./microframe\n");
  1275. return -EINVAL;
  1276. } else if ((nt + 1) * max > buffer_size) {
  1277. dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
  1278. buffer_size, (nt + 1), max);
  1279. return -EINVAL;
  1280. } else if (max == 0) {
  1281. dev_err(dev->dev, "Max payload cannot be set to 0\n");
  1282. return -EINVAL;
  1283. } else if (max > ep->ep.maxpacket_limit) {
  1284. dev_err(dev->dev, "Requested max payload %d > limit %d\n",
  1285. max, ep->ep.maxpacket_limit);
  1286. return -EINVAL;
  1287. }
  1288. spin_lock(&ep->dev->lock);
  1289. if (!ep->stopped) {
  1290. spin_unlock(&ep->dev->lock);
  1291. return -EBUSY;
  1292. }
  1293. ep->stopped = 0;
  1294. ep->wedged = 0;
  1295. ep->ep.desc = desc;
  1296. ep->ep.maxpacket = max;
  1297. ep->dma_start = 0;
  1298. if (nt) {
  1299. /*
  1300. * Maximum possible size of all payloads in one microframe
  1301. * regardless of direction when using high-bandwidth mode.
  1302. */
  1303. ep->bytes_per_buffer = (nt + 1) * max;
  1304. } else if (ep->is_in) {
  1305. /*
  1306. * The biggest multiple of maximum packet size that fits into
  1307. * the buffer. The hardware will split up into many packets in
  1308. * the IN direction.
  1309. */
  1310. ep->bytes_per_buffer = (buffer_size / max) * max;
  1311. } else {
  1312. /*
  1313. * Only single packets will be placed the buffers in the OUT
  1314. * direction.
  1315. */
  1316. ep->bytes_per_buffer = max;
  1317. }
  1318. epctrl = (max << GR_EPCTRL_MAXPL_POS)
  1319. | (nt << GR_EPCTRL_NT_POS)
  1320. | (mode << GR_EPCTRL_TT_POS)
  1321. | GR_EPCTRL_EV;
  1322. if (ep->is_in)
  1323. epctrl |= GR_EPCTRL_PI;
  1324. gr_write32(&ep->regs->epctrl, epctrl);
  1325. gr_write32(&ep->regs->dmactrl, GR_DMACTRL_IE | GR_DMACTRL_AI);
  1326. spin_unlock(&ep->dev->lock);
  1327. dev_dbg(ep->dev->dev, "EP: %s enabled - %s with %d bytes/buffer\n",
  1328. ep->ep.name, gr_modestring[mode], ep->bytes_per_buffer);
  1329. return 0;
  1330. }
  1331. /* Disable endpoint. Not for ep0in and ep0out that are handled separately. */
  1332. static int gr_ep_disable(struct usb_ep *_ep)
  1333. {
  1334. struct gr_ep *ep;
  1335. struct gr_udc *dev;
  1336. unsigned long flags;
  1337. ep = container_of(_ep, struct gr_ep, ep);
  1338. if (!_ep || !ep->ep.desc)
  1339. return -ENODEV;
  1340. dev = ep->dev;
  1341. /* 'ep0' IN and OUT are reserved */
  1342. if (ep == &dev->epo[0] || ep == &dev->epi[0])
  1343. return -EINVAL;
  1344. if (dev->ep0state == GR_EP0_SUSPEND)
  1345. return -EBUSY;
  1346. dev_dbg(ep->dev->dev, "EP: disable %s\n", ep->ep.name);
  1347. spin_lock_irqsave(&dev->lock, flags);
  1348. gr_ep_nuke(ep);
  1349. gr_ep_reset(ep);
  1350. ep->ep.desc = NULL;
  1351. spin_unlock_irqrestore(&dev->lock, flags);
  1352. return 0;
  1353. }
  1354. /*
  1355. * Frees a request, but not any DMA buffers associated with it
  1356. * (gr_finish_request should already have taken care of that).
  1357. */
  1358. static void gr_free_request(struct usb_ep *_ep, struct usb_request *_req)
  1359. {
  1360. struct gr_request *req;
  1361. if (!_ep || !_req)
  1362. return;
  1363. req = container_of(_req, struct gr_request, req);
  1364. /* Leads to memory leak */
  1365. WARN(!list_empty(&req->queue),
  1366. "request not dequeued properly before freeing\n");
  1367. kfree(req);
  1368. }
  1369. /* Queue a request from the gadget */
  1370. static int gr_queue_ext(struct usb_ep *_ep, struct usb_request *_req,
  1371. gfp_t gfp_flags)
  1372. {
  1373. struct gr_ep *ep;
  1374. struct gr_request *req;
  1375. struct gr_udc *dev;
  1376. int ret;
  1377. if (unlikely(!_ep || !_req))
  1378. return -EINVAL;
  1379. ep = container_of(_ep, struct gr_ep, ep);
  1380. req = container_of(_req, struct gr_request, req);
  1381. dev = ep->dev;
  1382. spin_lock(&ep->dev->lock);
  1383. /*
  1384. * The ep0 pointer in the gadget struct is used both for ep0in and
  1385. * ep0out. In a data stage in the out direction ep0out needs to be used
  1386. * instead of the default ep0in. Completion functions might use
  1387. * driver_data, so that needs to be copied as well.
  1388. */
  1389. if ((ep == &dev->epi[0]) && (dev->ep0state == GR_EP0_ODATA)) {
  1390. ep = &dev->epo[0];
  1391. ep->ep.driver_data = dev->epi[0].ep.driver_data;
  1392. }
  1393. if (ep->is_in)
  1394. gr_dbgprint_request("EXTERN", ep, req);
  1395. ret = gr_queue(ep, req, GFP_ATOMIC);
  1396. spin_unlock(&ep->dev->lock);
  1397. return ret;
  1398. }
  1399. /* Dequeue JUST ONE request */
  1400. static int gr_dequeue(struct usb_ep *_ep, struct usb_request *_req)
  1401. {
  1402. struct gr_request *req;
  1403. struct gr_ep *ep;
  1404. struct gr_udc *dev;
  1405. int ret = 0;
  1406. unsigned long flags;
  1407. ep = container_of(_ep, struct gr_ep, ep);
  1408. if (!_ep || !_req || (!ep->ep.desc && ep->num != 0))
  1409. return -EINVAL;
  1410. dev = ep->dev;
  1411. if (!dev->driver)
  1412. return -ESHUTDOWN;
  1413. /* We can't touch (DMA) registers when suspended */
  1414. if (dev->ep0state == GR_EP0_SUSPEND)
  1415. return -EBUSY;
  1416. spin_lock_irqsave(&dev->lock, flags);
  1417. /* Make sure it's actually queued on this endpoint */
  1418. list_for_each_entry(req, &ep->queue, queue) {
  1419. if (&req->req == _req)
  1420. break;
  1421. }
  1422. if (&req->req != _req) {
  1423. ret = -EINVAL;
  1424. goto out;
  1425. }
  1426. if (list_first_entry(&ep->queue, struct gr_request, queue) == req) {
  1427. /* This request is currently being processed */
  1428. gr_abort_dma(ep);
  1429. if (ep->stopped)
  1430. gr_finish_request(ep, req, -ECONNRESET);
  1431. else
  1432. gr_dma_advance(ep, -ECONNRESET);
  1433. } else if (!list_empty(&req->queue)) {
  1434. /* Not being processed - gr_finish_request dequeues it */
  1435. gr_finish_request(ep, req, -ECONNRESET);
  1436. } else {
  1437. ret = -EOPNOTSUPP;
  1438. }
  1439. out:
  1440. spin_unlock_irqrestore(&dev->lock, flags);
  1441. return ret;
  1442. }
  1443. /* Helper for gr_set_halt and gr_set_wedge */
  1444. static int gr_set_halt_wedge(struct usb_ep *_ep, int halt, int wedge)
  1445. {
  1446. int ret;
  1447. struct gr_ep *ep;
  1448. if (!_ep)
  1449. return -ENODEV;
  1450. ep = container_of(_ep, struct gr_ep, ep);
  1451. spin_lock(&ep->dev->lock);
  1452. /* Halting an IN endpoint should fail if queue is not empty */
  1453. if (halt && ep->is_in && !list_empty(&ep->queue)) {
  1454. ret = -EAGAIN;
  1455. goto out;
  1456. }
  1457. ret = gr_ep_halt_wedge(ep, halt, wedge, 0);
  1458. out:
  1459. spin_unlock(&ep->dev->lock);
  1460. return ret;
  1461. }
  1462. /* Halt endpoint */
  1463. static int gr_set_halt(struct usb_ep *_ep, int halt)
  1464. {
  1465. return gr_set_halt_wedge(_ep, halt, 0);
  1466. }
  1467. /* Halt and wedge endpoint */
  1468. static int gr_set_wedge(struct usb_ep *_ep)
  1469. {
  1470. return gr_set_halt_wedge(_ep, 1, 1);
  1471. }
  1472. /*
  1473. * Return the total number of bytes currently stored in the internal buffers of
  1474. * the endpoint.
  1475. */
  1476. static int gr_fifo_status(struct usb_ep *_ep)
  1477. {
  1478. struct gr_ep *ep;
  1479. u32 epstat;
  1480. u32 bytes = 0;
  1481. if (!_ep)
  1482. return -ENODEV;
  1483. ep = container_of(_ep, struct gr_ep, ep);
  1484. epstat = gr_read32(&ep->regs->epstat);
  1485. if (epstat & GR_EPSTAT_B0)
  1486. bytes += (epstat & GR_EPSTAT_B0CNT_MASK) >> GR_EPSTAT_B0CNT_POS;
  1487. if (epstat & GR_EPSTAT_B1)
  1488. bytes += (epstat & GR_EPSTAT_B1CNT_MASK) >> GR_EPSTAT_B1CNT_POS;
  1489. return bytes;
  1490. }
  1491. /* Empty data from internal buffers of an endpoint. */
  1492. static void gr_fifo_flush(struct usb_ep *_ep)
  1493. {
  1494. struct gr_ep *ep;
  1495. u32 epctrl;
  1496. if (!_ep)
  1497. return;
  1498. ep = container_of(_ep, struct gr_ep, ep);
  1499. dev_vdbg(ep->dev->dev, "EP: flush fifo %s\n", ep->ep.name);
  1500. spin_lock(&ep->dev->lock);
  1501. epctrl = gr_read32(&ep->regs->epctrl);
  1502. epctrl |= GR_EPCTRL_CB;
  1503. gr_write32(&ep->regs->epctrl, epctrl);
  1504. spin_unlock(&ep->dev->lock);
  1505. }
  1506. static const struct usb_ep_ops gr_ep_ops = {
  1507. .enable = gr_ep_enable,
  1508. .disable = gr_ep_disable,
  1509. .alloc_request = gr_alloc_request,
  1510. .free_request = gr_free_request,
  1511. .queue = gr_queue_ext,
  1512. .dequeue = gr_dequeue,
  1513. .set_halt = gr_set_halt,
  1514. .set_wedge = gr_set_wedge,
  1515. .fifo_status = gr_fifo_status,
  1516. .fifo_flush = gr_fifo_flush,
  1517. };
  1518. /* ---------------------------------------------------------------------- */
  1519. /* USB Gadget ops */
  1520. static int gr_get_frame(struct usb_gadget *_gadget)
  1521. {
  1522. struct gr_udc *dev;
  1523. if (!_gadget)
  1524. return -ENODEV;
  1525. dev = container_of(_gadget, struct gr_udc, gadget);
  1526. return gr_read32(&dev->regs->status) & GR_STATUS_FN_MASK;
  1527. }
  1528. static int gr_wakeup(struct usb_gadget *_gadget)
  1529. {
  1530. struct gr_udc *dev;
  1531. if (!_gadget)
  1532. return -ENODEV;
  1533. dev = container_of(_gadget, struct gr_udc, gadget);
  1534. /* Remote wakeup feature not enabled by host*/
  1535. if (!dev->remote_wakeup)
  1536. return -EINVAL;
  1537. spin_lock(&dev->lock);
  1538. gr_write32(&dev->regs->control,
  1539. gr_read32(&dev->regs->control) | GR_CONTROL_RW);
  1540. spin_unlock(&dev->lock);
  1541. return 0;
  1542. }
  1543. static int gr_pullup(struct usb_gadget *_gadget, int is_on)
  1544. {
  1545. struct gr_udc *dev;
  1546. u32 control;
  1547. if (!_gadget)
  1548. return -ENODEV;
  1549. dev = container_of(_gadget, struct gr_udc, gadget);
  1550. spin_lock(&dev->lock);
  1551. control = gr_read32(&dev->regs->control);
  1552. if (is_on)
  1553. control |= GR_CONTROL_EP;
  1554. else
  1555. control &= ~GR_CONTROL_EP;
  1556. gr_write32(&dev->regs->control, control);
  1557. spin_unlock(&dev->lock);
  1558. return 0;
  1559. }
  1560. static int gr_udc_start(struct usb_gadget *gadget,
  1561. struct usb_gadget_driver *driver)
  1562. {
  1563. struct gr_udc *dev = to_gr_udc(gadget);
  1564. spin_lock(&dev->lock);
  1565. /* Hook up the driver */
  1566. driver->driver.bus = NULL;
  1567. dev->driver = driver;
  1568. /* Get ready for host detection */
  1569. gr_enable_vbus_detect(dev);
  1570. spin_unlock(&dev->lock);
  1571. return 0;
  1572. }
  1573. static int gr_udc_stop(struct usb_gadget *gadget)
  1574. {
  1575. struct gr_udc *dev = to_gr_udc(gadget);
  1576. unsigned long flags;
  1577. spin_lock_irqsave(&dev->lock, flags);
  1578. dev->driver = NULL;
  1579. gr_stop_activity(dev);
  1580. spin_unlock_irqrestore(&dev->lock, flags);
  1581. return 0;
  1582. }
  1583. static const struct usb_gadget_ops gr_ops = {
  1584. .get_frame = gr_get_frame,
  1585. .wakeup = gr_wakeup,
  1586. .pullup = gr_pullup,
  1587. .udc_start = gr_udc_start,
  1588. .udc_stop = gr_udc_stop,
  1589. /* Other operations not supported */
  1590. };
  1591. /* ---------------------------------------------------------------------- */
  1592. /* Module probe, removal and of-matching */
  1593. static const char * const onames[] = {
  1594. "ep0out", "ep1out", "ep2out", "ep3out", "ep4out", "ep5out",
  1595. "ep6out", "ep7out", "ep8out", "ep9out", "ep10out", "ep11out",
  1596. "ep12out", "ep13out", "ep14out", "ep15out"
  1597. };
  1598. static const char * const inames[] = {
  1599. "ep0in", "ep1in", "ep2in", "ep3in", "ep4in", "ep5in",
  1600. "ep6in", "ep7in", "ep8in", "ep9in", "ep10in", "ep11in",
  1601. "ep12in", "ep13in", "ep14in", "ep15in"
  1602. };
  1603. /* Must be called with dev->lock held */
  1604. static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
  1605. {
  1606. struct gr_ep *ep;
  1607. struct gr_request *req;
  1608. struct usb_request *_req;
  1609. void *buf;
  1610. if (is_in) {
  1611. ep = &dev->epi[num];
  1612. ep->ep.name = inames[num];
  1613. ep->regs = &dev->regs->epi[num];
  1614. } else {
  1615. ep = &dev->epo[num];
  1616. ep->ep.name = onames[num];
  1617. ep->regs = &dev->regs->epo[num];
  1618. }
  1619. gr_ep_reset(ep);
  1620. ep->num = num;
  1621. ep->is_in = is_in;
  1622. ep->dev = dev;
  1623. ep->ep.ops = &gr_ep_ops;
  1624. INIT_LIST_HEAD(&ep->queue);
  1625. if (num == 0) {
  1626. _req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
  1627. buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
  1628. if (!_req || !buf) {
  1629. /* possible _req freed by gr_probe via gr_remove */
  1630. return -ENOMEM;
  1631. }
  1632. req = container_of(_req, struct gr_request, req);
  1633. req->req.buf = buf;
  1634. req->req.length = MAX_CTRL_PL_SIZE;
  1635. if (is_in)
  1636. dev->ep0reqi = req; /* Complete gets set as used */
  1637. else
  1638. dev->ep0reqo = req; /* Completion treated separately */
  1639. usb_ep_set_maxpacket_limit(&ep->ep, MAX_CTRL_PL_SIZE);
  1640. ep->bytes_per_buffer = MAX_CTRL_PL_SIZE;
  1641. ep->ep.caps.type_control = true;
  1642. } else {
  1643. usb_ep_set_maxpacket_limit(&ep->ep, (u16)maxplimit);
  1644. list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
  1645. ep->ep.caps.type_iso = true;
  1646. ep->ep.caps.type_bulk = true;
  1647. ep->ep.caps.type_int = true;
  1648. }
  1649. list_add_tail(&ep->ep_list, &dev->ep_list);
  1650. if (is_in)
  1651. ep->ep.caps.dir_in = true;
  1652. else
  1653. ep->ep.caps.dir_out = true;
  1654. ep->tailbuf = dma_alloc_coherent(dev->dev, ep->ep.maxpacket_limit,
  1655. &ep->tailbuf_paddr, GFP_ATOMIC);
  1656. if (!ep->tailbuf)
  1657. return -ENOMEM;
  1658. return 0;
  1659. }
  1660. /* Must be called with dev->lock held */
  1661. static int gr_udc_init(struct gr_udc *dev)
  1662. {
  1663. struct device_node *np = dev->dev->of_node;
  1664. u32 epctrl_val;
  1665. u32 dmactrl_val;
  1666. int i;
  1667. int ret = 0;
  1668. u32 bufsize;
  1669. gr_set_address(dev, 0);
  1670. INIT_LIST_HEAD(&dev->gadget.ep_list);
  1671. dev->gadget.speed = USB_SPEED_UNKNOWN;
  1672. dev->gadget.ep0 = &dev->epi[0].ep;
  1673. INIT_LIST_HEAD(&dev->ep_list);
  1674. gr_set_ep0state(dev, GR_EP0_DISCONNECT);
  1675. for (i = 0; i < dev->nepo; i++) {
  1676. if (of_property_read_u32_index(np, "epobufsizes", i, &bufsize))
  1677. bufsize = 1024;
  1678. ret = gr_ep_init(dev, i, 0, bufsize);
  1679. if (ret)
  1680. return ret;
  1681. }
  1682. for (i = 0; i < dev->nepi; i++) {
  1683. if (of_property_read_u32_index(np, "epibufsizes", i, &bufsize))
  1684. bufsize = 1024;
  1685. ret = gr_ep_init(dev, i, 1, bufsize);
  1686. if (ret)
  1687. return ret;
  1688. }
  1689. /* Must be disabled by default */
  1690. dev->remote_wakeup = 0;
  1691. /* Enable ep0out and ep0in */
  1692. epctrl_val = (MAX_CTRL_PL_SIZE << GR_EPCTRL_MAXPL_POS) | GR_EPCTRL_EV;
  1693. dmactrl_val = GR_DMACTRL_IE | GR_DMACTRL_AI;
  1694. gr_write32(&dev->epo[0].regs->epctrl, epctrl_val);
  1695. gr_write32(&dev->epi[0].regs->epctrl, epctrl_val | GR_EPCTRL_PI);
  1696. gr_write32(&dev->epo[0].regs->dmactrl, dmactrl_val);
  1697. gr_write32(&dev->epi[0].regs->dmactrl, dmactrl_val);
  1698. return 0;
  1699. }
  1700. static void gr_ep_remove(struct gr_udc *dev, int num, int is_in)
  1701. {
  1702. struct gr_ep *ep;
  1703. if (is_in)
  1704. ep = &dev->epi[num];
  1705. else
  1706. ep = &dev->epo[num];
  1707. if (ep->tailbuf)
  1708. dma_free_coherent(dev->dev, ep->ep.maxpacket_limit,
  1709. ep->tailbuf, ep->tailbuf_paddr);
  1710. }
  1711. static int gr_remove(struct platform_device *pdev)
  1712. {
  1713. struct gr_udc *dev = platform_get_drvdata(pdev);
  1714. int i;
  1715. if (dev->added)
  1716. usb_del_gadget_udc(&dev->gadget); /* Shuts everything down */
  1717. if (dev->driver)
  1718. return -EBUSY;
  1719. gr_dfs_delete(dev);
  1720. dma_pool_destroy(dev->desc_pool);
  1721. platform_set_drvdata(pdev, NULL);
  1722. gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
  1723. gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req);
  1724. for (i = 0; i < dev->nepo; i++)
  1725. gr_ep_remove(dev, i, 0);
  1726. for (i = 0; i < dev->nepi; i++)
  1727. gr_ep_remove(dev, i, 1);
  1728. return 0;
  1729. }
  1730. static int gr_request_irq(struct gr_udc *dev, int irq)
  1731. {
  1732. return devm_request_threaded_irq(dev->dev, irq, gr_irq, gr_irq_handler,
  1733. IRQF_SHARED, driver_name, dev);
  1734. }
  1735. static int gr_probe(struct platform_device *pdev)
  1736. {
  1737. struct gr_udc *dev;
  1738. struct resource *res;
  1739. struct gr_regs __iomem *regs;
  1740. int retval;
  1741. u32 status;
  1742. dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
  1743. if (!dev)
  1744. return -ENOMEM;
  1745. dev->dev = &pdev->dev;
  1746. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1747. regs = devm_ioremap_resource(dev->dev, res);
  1748. if (IS_ERR(regs))
  1749. return PTR_ERR(regs);
  1750. dev->irq = platform_get_irq(pdev, 0);
  1751. if (dev->irq <= 0) {
  1752. dev_err(dev->dev, "No irq found\n");
  1753. return -ENODEV;
  1754. }
  1755. /* Some core configurations has separate irqs for IN and OUT events */
  1756. dev->irqi = platform_get_irq(pdev, 1);
  1757. if (dev->irqi > 0) {
  1758. dev->irqo = platform_get_irq(pdev, 2);
  1759. if (dev->irqo <= 0) {
  1760. dev_err(dev->dev, "Found irqi but not irqo\n");
  1761. return -ENODEV;
  1762. }
  1763. } else {
  1764. dev->irqi = 0;
  1765. }
  1766. dev->gadget.name = driver_name;
  1767. dev->gadget.max_speed = USB_SPEED_HIGH;
  1768. dev->gadget.ops = &gr_ops;
  1769. spin_lock_init(&dev->lock);
  1770. dev->regs = regs;
  1771. platform_set_drvdata(pdev, dev);
  1772. /* Determine number of endpoints and data interface mode */
  1773. status = gr_read32(&dev->regs->status);
  1774. dev->nepi = ((status & GR_STATUS_NEPI_MASK) >> GR_STATUS_NEPI_POS) + 1;
  1775. dev->nepo = ((status & GR_STATUS_NEPO_MASK) >> GR_STATUS_NEPO_POS) + 1;
  1776. if (!(status & GR_STATUS_DM)) {
  1777. dev_err(dev->dev, "Slave mode cores are not supported\n");
  1778. return -ENODEV;
  1779. }
  1780. /* --- Effects of the following calls might need explicit cleanup --- */
  1781. /* Create DMA pool for descriptors */
  1782. dev->desc_pool = dma_pool_create("desc_pool", dev->dev,
  1783. sizeof(struct gr_dma_desc), 4, 0);
  1784. if (!dev->desc_pool) {
  1785. dev_err(dev->dev, "Could not allocate DMA pool");
  1786. return -ENOMEM;
  1787. }
  1788. /* Inside lock so that no gadget can use this udc until probe is done */
  1789. retval = usb_add_gadget_udc(dev->dev, &dev->gadget);
  1790. if (retval) {
  1791. dev_err(dev->dev, "Could not add gadget udc");
  1792. goto out;
  1793. }
  1794. dev->added = 1;
  1795. spin_lock(&dev->lock);
  1796. retval = gr_udc_init(dev);
  1797. if (retval) {
  1798. spin_unlock(&dev->lock);
  1799. goto out;
  1800. }
  1801. /* Clear all interrupt enables that might be left on since last boot */
  1802. gr_disable_interrupts_and_pullup(dev);
  1803. spin_unlock(&dev->lock);
  1804. gr_dfs_create(dev);
  1805. retval = gr_request_irq(dev, dev->irq);
  1806. if (retval) {
  1807. dev_err(dev->dev, "Failed to request irq %d\n", dev->irq);
  1808. goto out;
  1809. }
  1810. if (dev->irqi) {
  1811. retval = gr_request_irq(dev, dev->irqi);
  1812. if (retval) {
  1813. dev_err(dev->dev, "Failed to request irqi %d\n",
  1814. dev->irqi);
  1815. goto out;
  1816. }
  1817. retval = gr_request_irq(dev, dev->irqo);
  1818. if (retval) {
  1819. dev_err(dev->dev, "Failed to request irqo %d\n",
  1820. dev->irqo);
  1821. goto out;
  1822. }
  1823. }
  1824. if (dev->irqi)
  1825. dev_info(dev->dev, "regs: %p, irqs %d, %d, %d\n", dev->regs,
  1826. dev->irq, dev->irqi, dev->irqo);
  1827. else
  1828. dev_info(dev->dev, "regs: %p, irq %d\n", dev->regs, dev->irq);
  1829. out:
  1830. if (retval)
  1831. gr_remove(pdev);
  1832. return retval;
  1833. }
  1834. static const struct of_device_id gr_match[] = {
  1835. {.name = "GAISLER_USBDC"},
  1836. {.name = "01_021"},
  1837. {},
  1838. };
  1839. MODULE_DEVICE_TABLE(of, gr_match);
  1840. static struct platform_driver gr_driver = {
  1841. .driver = {
  1842. .name = DRIVER_NAME,
  1843. .of_match_table = gr_match,
  1844. },
  1845. .probe = gr_probe,
  1846. .remove = gr_remove,
  1847. };
  1848. module_platform_driver(gr_driver);
  1849. MODULE_AUTHOR("Aeroflex Gaisler AB.");
  1850. MODULE_DESCRIPTION(DRIVER_DESC);
  1851. MODULE_LICENSE("GPL");