oxu210hp-hcd.c 98 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929
  1. /*
  2. * Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it>
  3. * Copyright (c) 2008 Eurotech S.p.A. <info@eurtech.it>
  4. *
  5. * This code is *strongly* based on EHCI-HCD code by David Brownell since
  6. * the chip is a quasi-EHCI compatible.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License as published by the
  10. * Free Software Foundation; either version 2 of the License, or (at your
  11. * option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  16. * for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software Foundation,
  20. * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #include <linux/module.h>
  23. #include <linux/pci.h>
  24. #include <linux/dmapool.h>
  25. #include <linux/kernel.h>
  26. #include <linux/delay.h>
  27. #include <linux/ioport.h>
  28. #include <linux/sched.h>
  29. #include <linux/slab.h>
  30. #include <linux/errno.h>
  31. #include <linux/timer.h>
  32. #include <linux/list.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/usb.h>
  35. #include <linux/usb/hcd.h>
  36. #include <linux/moduleparam.h>
  37. #include <linux/dma-mapping.h>
  38. #include <linux/io.h>
  39. #include <asm/irq.h>
  40. #include <asm/unaligned.h>
  41. #include <linux/irq.h>
  42. #include <linux/platform_device.h>
  43. #include "oxu210hp.h"
  44. #define DRIVER_VERSION "0.0.50"
  45. /*
  46. * Main defines
  47. */
  48. #define oxu_dbg(oxu, fmt, args...) \
  49. dev_dbg(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
  50. #define oxu_err(oxu, fmt, args...) \
  51. dev_err(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
  52. #define oxu_info(oxu, fmt, args...) \
  53. dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
  54. #ifdef CONFIG_DYNAMIC_DEBUG
  55. #define DEBUG
  56. #endif
  57. static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu)
  58. {
  59. return container_of((void *) oxu, struct usb_hcd, hcd_priv);
  60. }
  61. static inline struct oxu_hcd *hcd_to_oxu(struct usb_hcd *hcd)
  62. {
  63. return (struct oxu_hcd *) (hcd->hcd_priv);
  64. }
  65. /*
  66. * Debug stuff
  67. */
  68. #undef OXU_URB_TRACE
  69. #undef OXU_VERBOSE_DEBUG
  70. #ifdef OXU_VERBOSE_DEBUG
  71. #define oxu_vdbg oxu_dbg
  72. #else
  73. #define oxu_vdbg(oxu, fmt, args...) /* Nop */
  74. #endif
  75. #ifdef DEBUG
  76. static int __attribute__((__unused__))
  77. dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
  78. {
  79. return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
  80. label, label[0] ? " " : "", status,
  81. (status & STS_ASS) ? " Async" : "",
  82. (status & STS_PSS) ? " Periodic" : "",
  83. (status & STS_RECL) ? " Recl" : "",
  84. (status & STS_HALT) ? " Halt" : "",
  85. (status & STS_IAA) ? " IAA" : "",
  86. (status & STS_FATAL) ? " FATAL" : "",
  87. (status & STS_FLR) ? " FLR" : "",
  88. (status & STS_PCD) ? " PCD" : "",
  89. (status & STS_ERR) ? " ERR" : "",
  90. (status & STS_INT) ? " INT" : ""
  91. );
  92. }
  93. static int __attribute__((__unused__))
  94. dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
  95. {
  96. return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s",
  97. label, label[0] ? " " : "", enable,
  98. (enable & STS_IAA) ? " IAA" : "",
  99. (enable & STS_FATAL) ? " FATAL" : "",
  100. (enable & STS_FLR) ? " FLR" : "",
  101. (enable & STS_PCD) ? " PCD" : "",
  102. (enable & STS_ERR) ? " ERR" : "",
  103. (enable & STS_INT) ? " INT" : ""
  104. );
  105. }
  106. static const char *const fls_strings[] =
  107. { "1024", "512", "256", "??" };
  108. static int dbg_command_buf(char *buf, unsigned len,
  109. const char *label, u32 command)
  110. {
  111. return scnprintf(buf, len,
  112. "%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
  113. label, label[0] ? " " : "", command,
  114. (command & CMD_PARK) ? "park" : "(park)",
  115. CMD_PARK_CNT(command),
  116. (command >> 16) & 0x3f,
  117. (command & CMD_LRESET) ? " LReset" : "",
  118. (command & CMD_IAAD) ? " IAAD" : "",
  119. (command & CMD_ASE) ? " Async" : "",
  120. (command & CMD_PSE) ? " Periodic" : "",
  121. fls_strings[(command >> 2) & 0x3],
  122. (command & CMD_RESET) ? " Reset" : "",
  123. (command & CMD_RUN) ? "RUN" : "HALT"
  124. );
  125. }
  126. static int dbg_port_buf(char *buf, unsigned len, const char *label,
  127. int port, u32 status)
  128. {
  129. char *sig;
  130. /* signaling state */
  131. switch (status & (3 << 10)) {
  132. case 0 << 10:
  133. sig = "se0";
  134. break;
  135. case 1 << 10:
  136. sig = "k"; /* low speed */
  137. break;
  138. case 2 << 10:
  139. sig = "j";
  140. break;
  141. default:
  142. sig = "?";
  143. break;
  144. }
  145. return scnprintf(buf, len,
  146. "%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
  147. label, label[0] ? " " : "", port, status,
  148. (status & PORT_POWER) ? " POWER" : "",
  149. (status & PORT_OWNER) ? " OWNER" : "",
  150. sig,
  151. (status & PORT_RESET) ? " RESET" : "",
  152. (status & PORT_SUSPEND) ? " SUSPEND" : "",
  153. (status & PORT_RESUME) ? " RESUME" : "",
  154. (status & PORT_OCC) ? " OCC" : "",
  155. (status & PORT_OC) ? " OC" : "",
  156. (status & PORT_PEC) ? " PEC" : "",
  157. (status & PORT_PE) ? " PE" : "",
  158. (status & PORT_CSC) ? " CSC" : "",
  159. (status & PORT_CONNECT) ? " CONNECT" : ""
  160. );
  161. }
  162. #else
  163. static inline int __attribute__((__unused__))
  164. dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
  165. { return 0; }
  166. static inline int __attribute__((__unused__))
  167. dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
  168. { return 0; }
  169. static inline int __attribute__((__unused__))
  170. dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
  171. { return 0; }
  172. static inline int __attribute__((__unused__))
  173. dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
  174. { return 0; }
  175. #endif /* DEBUG */
  176. /* functions have the "wrong" filename when they're output... */
  177. #define dbg_status(oxu, label, status) { \
  178. char _buf[80]; \
  179. dbg_status_buf(_buf, sizeof _buf, label, status); \
  180. oxu_dbg(oxu, "%s\n", _buf); \
  181. }
  182. #define dbg_cmd(oxu, label, command) { \
  183. char _buf[80]; \
  184. dbg_command_buf(_buf, sizeof _buf, label, command); \
  185. oxu_dbg(oxu, "%s\n", _buf); \
  186. }
  187. #define dbg_port(oxu, label, port, status) { \
  188. char _buf[80]; \
  189. dbg_port_buf(_buf, sizeof _buf, label, port, status); \
  190. oxu_dbg(oxu, "%s\n", _buf); \
  191. }
  192. /*
  193. * Module parameters
  194. */
  195. /* Initial IRQ latency: faster than hw default */
  196. static int log2_irq_thresh; /* 0 to 6 */
  197. module_param(log2_irq_thresh, int, S_IRUGO);
  198. MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
  199. /* Initial park setting: slower than hw default */
  200. static unsigned park;
  201. module_param(park, uint, S_IRUGO);
  202. MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
  203. /* For flakey hardware, ignore overcurrent indicators */
  204. static bool ignore_oc;
  205. module_param(ignore_oc, bool, S_IRUGO);
  206. MODULE_PARM_DESC(ignore_oc, "ignore bogus hardware overcurrent indications");
  207. static void ehci_work(struct oxu_hcd *oxu);
  208. static int oxu_hub_control(struct usb_hcd *hcd,
  209. u16 typeReq, u16 wValue, u16 wIndex,
  210. char *buf, u16 wLength);
  211. /*
  212. * Local functions
  213. */
  214. /* Low level read/write registers functions */
  215. static inline u32 oxu_readl(void *base, u32 reg)
  216. {
  217. return readl(base + reg);
  218. }
  219. static inline void oxu_writel(void *base, u32 reg, u32 val)
  220. {
  221. writel(val, base + reg);
  222. }
  223. static inline void timer_action_done(struct oxu_hcd *oxu,
  224. enum ehci_timer_action action)
  225. {
  226. clear_bit(action, &oxu->actions);
  227. }
  228. static inline void timer_action(struct oxu_hcd *oxu,
  229. enum ehci_timer_action action)
  230. {
  231. if (!test_and_set_bit(action, &oxu->actions)) {
  232. unsigned long t;
  233. switch (action) {
  234. case TIMER_IAA_WATCHDOG:
  235. t = EHCI_IAA_JIFFIES;
  236. break;
  237. case TIMER_IO_WATCHDOG:
  238. t = EHCI_IO_JIFFIES;
  239. break;
  240. case TIMER_ASYNC_OFF:
  241. t = EHCI_ASYNC_JIFFIES;
  242. break;
  243. case TIMER_ASYNC_SHRINK:
  244. default:
  245. t = EHCI_SHRINK_JIFFIES;
  246. break;
  247. }
  248. t += jiffies;
  249. /* all timings except IAA watchdog can be overridden.
  250. * async queue SHRINK often precedes IAA. while it's ready
  251. * to go OFF neither can matter, and afterwards the IO
  252. * watchdog stops unless there's still periodic traffic.
  253. */
  254. if (action != TIMER_IAA_WATCHDOG
  255. && t > oxu->watchdog.expires
  256. && timer_pending(&oxu->watchdog))
  257. return;
  258. mod_timer(&oxu->watchdog, t);
  259. }
  260. }
  261. /*
  262. * handshake - spin reading hc until handshake completes or fails
  263. * @ptr: address of hc register to be read
  264. * @mask: bits to look at in result of read
  265. * @done: value of those bits when handshake succeeds
  266. * @usec: timeout in microseconds
  267. *
  268. * Returns negative errno, or zero on success
  269. *
  270. * Success happens when the "mask" bits have the specified value (hardware
  271. * handshake done). There are two failure modes: "usec" have passed (major
  272. * hardware flakeout), or the register reads as all-ones (hardware removed).
  273. *
  274. * That last failure should_only happen in cases like physical cardbus eject
  275. * before driver shutdown. But it also seems to be caused by bugs in cardbus
  276. * bridge shutdown: shutting down the bridge before the devices using it.
  277. */
  278. static int handshake(struct oxu_hcd *oxu, void __iomem *ptr,
  279. u32 mask, u32 done, int usec)
  280. {
  281. u32 result;
  282. do {
  283. result = readl(ptr);
  284. if (result == ~(u32)0) /* card removed */
  285. return -ENODEV;
  286. result &= mask;
  287. if (result == done)
  288. return 0;
  289. udelay(1);
  290. usec--;
  291. } while (usec > 0);
  292. return -ETIMEDOUT;
  293. }
  294. /* Force HC to halt state from unknown (EHCI spec section 2.3) */
  295. static int ehci_halt(struct oxu_hcd *oxu)
  296. {
  297. u32 temp = readl(&oxu->regs->status);
  298. /* disable any irqs left enabled by previous code */
  299. writel(0, &oxu->regs->intr_enable);
  300. if ((temp & STS_HALT) != 0)
  301. return 0;
  302. temp = readl(&oxu->regs->command);
  303. temp &= ~CMD_RUN;
  304. writel(temp, &oxu->regs->command);
  305. return handshake(oxu, &oxu->regs->status,
  306. STS_HALT, STS_HALT, 16 * 125);
  307. }
  308. /* Put TDI/ARC silicon into EHCI mode */
  309. static void tdi_reset(struct oxu_hcd *oxu)
  310. {
  311. u32 __iomem *reg_ptr;
  312. u32 tmp;
  313. reg_ptr = (u32 __iomem *)(((u8 __iomem *)oxu->regs) + 0x68);
  314. tmp = readl(reg_ptr);
  315. tmp |= 0x3;
  316. writel(tmp, reg_ptr);
  317. }
  318. /* Reset a non-running (STS_HALT == 1) controller */
  319. static int ehci_reset(struct oxu_hcd *oxu)
  320. {
  321. int retval;
  322. u32 command = readl(&oxu->regs->command);
  323. command |= CMD_RESET;
  324. dbg_cmd(oxu, "reset", command);
  325. writel(command, &oxu->regs->command);
  326. oxu_to_hcd(oxu)->state = HC_STATE_HALT;
  327. oxu->next_statechange = jiffies;
  328. retval = handshake(oxu, &oxu->regs->command,
  329. CMD_RESET, 0, 250 * 1000);
  330. if (retval)
  331. return retval;
  332. tdi_reset(oxu);
  333. return retval;
  334. }
  335. /* Idle the controller (from running) */
  336. static void ehci_quiesce(struct oxu_hcd *oxu)
  337. {
  338. u32 temp;
  339. #ifdef DEBUG
  340. if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
  341. BUG();
  342. #endif
  343. /* wait for any schedule enables/disables to take effect */
  344. temp = readl(&oxu->regs->command) << 10;
  345. temp &= STS_ASS | STS_PSS;
  346. if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
  347. temp, 16 * 125) != 0) {
  348. oxu_to_hcd(oxu)->state = HC_STATE_HALT;
  349. return;
  350. }
  351. /* then disable anything that's still active */
  352. temp = readl(&oxu->regs->command);
  353. temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
  354. writel(temp, &oxu->regs->command);
  355. /* hardware can take 16 microframes to turn off ... */
  356. if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
  357. 0, 16 * 125) != 0) {
  358. oxu_to_hcd(oxu)->state = HC_STATE_HALT;
  359. return;
  360. }
  361. }
  362. static int check_reset_complete(struct oxu_hcd *oxu, int index,
  363. u32 __iomem *status_reg, int port_status)
  364. {
  365. if (!(port_status & PORT_CONNECT)) {
  366. oxu->reset_done[index] = 0;
  367. return port_status;
  368. }
  369. /* if reset finished and it's still not enabled -- handoff */
  370. if (!(port_status & PORT_PE)) {
  371. oxu_dbg(oxu, "Failed to enable port %d on root hub TT\n",
  372. index+1);
  373. return port_status;
  374. } else
  375. oxu_dbg(oxu, "port %d high speed\n", index + 1);
  376. return port_status;
  377. }
  378. static void ehci_hub_descriptor(struct oxu_hcd *oxu,
  379. struct usb_hub_descriptor *desc)
  380. {
  381. int ports = HCS_N_PORTS(oxu->hcs_params);
  382. u16 temp;
  383. desc->bDescriptorType = USB_DT_HUB;
  384. desc->bPwrOn2PwrGood = 10; /* oxu 1.0, 2.3.9 says 20ms max */
  385. desc->bHubContrCurrent = 0;
  386. desc->bNbrPorts = ports;
  387. temp = 1 + (ports / 8);
  388. desc->bDescLength = 7 + 2 * temp;
  389. /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
  390. memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
  391. memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
  392. temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */
  393. if (HCS_PPC(oxu->hcs_params))
  394. temp |= HUB_CHAR_INDV_PORT_LPSM; /* per-port power control */
  395. else
  396. temp |= HUB_CHAR_NO_LPSM; /* no power switching */
  397. desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
  398. }
  399. /* Allocate an OXU210HP on-chip memory data buffer
  400. *
  401. * An on-chip memory data buffer is required for each OXU210HP USB transfer.
  402. * Each transfer descriptor has one or more on-chip memory data buffers.
  403. *
  404. * Data buffers are allocated from a fix sized pool of data blocks.
  405. * To minimise fragmentation and give reasonable memory utlisation,
  406. * data buffers are allocated with sizes the power of 2 multiples of
  407. * the block size, starting on an address a multiple of the allocated size.
  408. *
  409. * FIXME: callers of this function require a buffer to be allocated for
  410. * len=0. This is a waste of on-chip memory and should be fix. Then this
  411. * function should be changed to not allocate a buffer for len=0.
  412. */
  413. static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
  414. {
  415. int n_blocks; /* minium blocks needed to hold len */
  416. int a_blocks; /* blocks allocated */
  417. int i, j;
  418. /* Don't allocte bigger than supported */
  419. if (len > BUFFER_SIZE * BUFFER_NUM) {
  420. oxu_err(oxu, "buffer too big (%d)\n", len);
  421. return -ENOMEM;
  422. }
  423. spin_lock(&oxu->mem_lock);
  424. /* Number of blocks needed to hold len */
  425. n_blocks = (len + BUFFER_SIZE - 1) / BUFFER_SIZE;
  426. /* Round the number of blocks up to the power of 2 */
  427. for (a_blocks = 1; a_blocks < n_blocks; a_blocks <<= 1)
  428. ;
  429. /* Find a suitable available data buffer */
  430. for (i = 0; i < BUFFER_NUM;
  431. i += max(a_blocks, (int)oxu->db_used[i])) {
  432. /* Check all the required blocks are available */
  433. for (j = 0; j < a_blocks; j++)
  434. if (oxu->db_used[i + j])
  435. break;
  436. if (j != a_blocks)
  437. continue;
  438. /* Allocate blocks found! */
  439. qtd->buffer = (void *) &oxu->mem->db_pool[i];
  440. qtd->buffer_dma = virt_to_phys(qtd->buffer);
  441. qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks;
  442. oxu->db_used[i] = a_blocks;
  443. spin_unlock(&oxu->mem_lock);
  444. return 0;
  445. }
  446. /* Failed */
  447. spin_unlock(&oxu->mem_lock);
  448. return -ENOMEM;
  449. }
  450. static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
  451. {
  452. int index;
  453. spin_lock(&oxu->mem_lock);
  454. index = (qtd->buffer - (void *) &oxu->mem->db_pool[0])
  455. / BUFFER_SIZE;
  456. oxu->db_used[index] = 0;
  457. qtd->qtd_buffer_len = 0;
  458. qtd->buffer_dma = 0;
  459. qtd->buffer = NULL;
  460. spin_unlock(&oxu->mem_lock);
  461. }
  462. static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma)
  463. {
  464. memset(qtd, 0, sizeof *qtd);
  465. qtd->qtd_dma = dma;
  466. qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
  467. qtd->hw_next = EHCI_LIST_END;
  468. qtd->hw_alt_next = EHCI_LIST_END;
  469. INIT_LIST_HEAD(&qtd->qtd_list);
  470. }
  471. static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
  472. {
  473. int index;
  474. if (qtd->buffer)
  475. oxu_buf_free(oxu, qtd);
  476. spin_lock(&oxu->mem_lock);
  477. index = qtd - &oxu->mem->qtd_pool[0];
  478. oxu->qtd_used[index] = 0;
  479. spin_unlock(&oxu->mem_lock);
  480. }
  481. static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu)
  482. {
  483. int i;
  484. struct ehci_qtd *qtd = NULL;
  485. spin_lock(&oxu->mem_lock);
  486. for (i = 0; i < QTD_NUM; i++)
  487. if (!oxu->qtd_used[i])
  488. break;
  489. if (i < QTD_NUM) {
  490. qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i];
  491. memset(qtd, 0, sizeof *qtd);
  492. qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
  493. qtd->hw_next = EHCI_LIST_END;
  494. qtd->hw_alt_next = EHCI_LIST_END;
  495. INIT_LIST_HEAD(&qtd->qtd_list);
  496. qtd->qtd_dma = virt_to_phys(qtd);
  497. oxu->qtd_used[i] = 1;
  498. }
  499. spin_unlock(&oxu->mem_lock);
  500. return qtd;
  501. }
  502. static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh)
  503. {
  504. int index;
  505. spin_lock(&oxu->mem_lock);
  506. index = qh - &oxu->mem->qh_pool[0];
  507. oxu->qh_used[index] = 0;
  508. spin_unlock(&oxu->mem_lock);
  509. }
  510. static void qh_destroy(struct kref *kref)
  511. {
  512. struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref);
  513. struct oxu_hcd *oxu = qh->oxu;
  514. /* clean qtds first, and know this is not linked */
  515. if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
  516. oxu_dbg(oxu, "unused qh not empty!\n");
  517. BUG();
  518. }
  519. if (qh->dummy)
  520. oxu_qtd_free(oxu, qh->dummy);
  521. oxu_qh_free(oxu, qh);
  522. }
  523. static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu)
  524. {
  525. int i;
  526. struct ehci_qh *qh = NULL;
  527. spin_lock(&oxu->mem_lock);
  528. for (i = 0; i < QHEAD_NUM; i++)
  529. if (!oxu->qh_used[i])
  530. break;
  531. if (i < QHEAD_NUM) {
  532. qh = (struct ehci_qh *) &oxu->mem->qh_pool[i];
  533. memset(qh, 0, sizeof *qh);
  534. kref_init(&qh->kref);
  535. qh->oxu = oxu;
  536. qh->qh_dma = virt_to_phys(qh);
  537. INIT_LIST_HEAD(&qh->qtd_list);
  538. /* dummy td enables safe urb queuing */
  539. qh->dummy = ehci_qtd_alloc(oxu);
  540. if (qh->dummy == NULL) {
  541. oxu_dbg(oxu, "no dummy td\n");
  542. oxu->qh_used[i] = 0;
  543. qh = NULL;
  544. goto unlock;
  545. }
  546. oxu->qh_used[i] = 1;
  547. }
  548. unlock:
  549. spin_unlock(&oxu->mem_lock);
  550. return qh;
  551. }
  552. /* to share a qh (cpu threads, or hc) */
  553. static inline struct ehci_qh *qh_get(struct ehci_qh *qh)
  554. {
  555. kref_get(&qh->kref);
  556. return qh;
  557. }
  558. static inline void qh_put(struct ehci_qh *qh)
  559. {
  560. kref_put(&qh->kref, qh_destroy);
  561. }
  562. static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb)
  563. {
  564. int index;
  565. spin_lock(&oxu->mem_lock);
  566. index = murb - &oxu->murb_pool[0];
  567. oxu->murb_used[index] = 0;
  568. spin_unlock(&oxu->mem_lock);
  569. }
  570. static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu)
  571. {
  572. int i;
  573. struct oxu_murb *murb = NULL;
  574. spin_lock(&oxu->mem_lock);
  575. for (i = 0; i < MURB_NUM; i++)
  576. if (!oxu->murb_used[i])
  577. break;
  578. if (i < MURB_NUM) {
  579. murb = &(oxu->murb_pool)[i];
  580. oxu->murb_used[i] = 1;
  581. }
  582. spin_unlock(&oxu->mem_lock);
  583. return murb;
  584. }
  585. /* The queue heads and transfer descriptors are managed from pools tied
  586. * to each of the "per device" structures.
  587. * This is the initialisation and cleanup code.
  588. */
  589. static void ehci_mem_cleanup(struct oxu_hcd *oxu)
  590. {
  591. kfree(oxu->murb_pool);
  592. oxu->murb_pool = NULL;
  593. if (oxu->async)
  594. qh_put(oxu->async);
  595. oxu->async = NULL;
  596. del_timer(&oxu->urb_timer);
  597. oxu->periodic = NULL;
  598. /* shadow periodic table */
  599. kfree(oxu->pshadow);
  600. oxu->pshadow = NULL;
  601. }
  602. /* Remember to add cleanup code (above) if you add anything here.
  603. */
  604. static int ehci_mem_init(struct oxu_hcd *oxu, gfp_t flags)
  605. {
  606. int i;
  607. for (i = 0; i < oxu->periodic_size; i++)
  608. oxu->mem->frame_list[i] = EHCI_LIST_END;
  609. for (i = 0; i < QHEAD_NUM; i++)
  610. oxu->qh_used[i] = 0;
  611. for (i = 0; i < QTD_NUM; i++)
  612. oxu->qtd_used[i] = 0;
  613. oxu->murb_pool = kcalloc(MURB_NUM, sizeof(struct oxu_murb), flags);
  614. if (!oxu->murb_pool)
  615. goto fail;
  616. for (i = 0; i < MURB_NUM; i++)
  617. oxu->murb_used[i] = 0;
  618. oxu->async = oxu_qh_alloc(oxu);
  619. if (!oxu->async)
  620. goto fail;
  621. oxu->periodic = (__le32 *) &oxu->mem->frame_list;
  622. oxu->periodic_dma = virt_to_phys(oxu->periodic);
  623. for (i = 0; i < oxu->periodic_size; i++)
  624. oxu->periodic[i] = EHCI_LIST_END;
  625. /* software shadow of hardware table */
  626. oxu->pshadow = kcalloc(oxu->periodic_size, sizeof(void *), flags);
  627. if (oxu->pshadow != NULL)
  628. return 0;
  629. fail:
  630. oxu_dbg(oxu, "couldn't init memory\n");
  631. ehci_mem_cleanup(oxu);
  632. return -ENOMEM;
  633. }
  634. /* Fill a qtd, returning how much of the buffer we were able to queue up.
  635. */
  636. static int qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
  637. int token, int maxpacket)
  638. {
  639. int i, count;
  640. u64 addr = buf;
  641. /* one buffer entry per 4K ... first might be short or unaligned */
  642. qtd->hw_buf[0] = cpu_to_le32((u32)addr);
  643. qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32));
  644. count = 0x1000 - (buf & 0x0fff); /* rest of that page */
  645. if (likely(len < count)) /* ... iff needed */
  646. count = len;
  647. else {
  648. buf += 0x1000;
  649. buf &= ~0x0fff;
  650. /* per-qtd limit: from 16K to 20K (best alignment) */
  651. for (i = 1; count < len && i < 5; i++) {
  652. addr = buf;
  653. qtd->hw_buf[i] = cpu_to_le32((u32)addr);
  654. qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32));
  655. buf += 0x1000;
  656. if ((count + 0x1000) < len)
  657. count += 0x1000;
  658. else
  659. count = len;
  660. }
  661. /* short packets may only terminate transfers */
  662. if (count != len)
  663. count -= (count % maxpacket);
  664. }
  665. qtd->hw_token = cpu_to_le32((count << 16) | token);
  666. qtd->length = count;
  667. return count;
  668. }
  669. static inline void qh_update(struct oxu_hcd *oxu,
  670. struct ehci_qh *qh, struct ehci_qtd *qtd)
  671. {
  672. /* writes to an active overlay are unsafe */
  673. BUG_ON(qh->qh_state != QH_STATE_IDLE);
  674. qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma);
  675. qh->hw_alt_next = EHCI_LIST_END;
  676. /* Except for control endpoints, we make hardware maintain data
  677. * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
  678. * and set the pseudo-toggle in udev. Only usb_clear_halt() will
  679. * ever clear it.
  680. */
  681. if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) {
  682. unsigned is_out, epnum;
  683. is_out = !(qtd->hw_token & cpu_to_le32(1 << 8));
  684. epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f;
  685. if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
  686. qh->hw_token &= ~cpu_to_le32(QTD_TOGGLE);
  687. usb_settoggle(qh->dev, epnum, is_out, 1);
  688. }
  689. }
  690. /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
  691. wmb();
  692. qh->hw_token &= cpu_to_le32(QTD_TOGGLE | QTD_STS_PING);
  693. }
  694. /* If it weren't for a common silicon quirk (writing the dummy into the qh
  695. * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
  696. * recovery (including urb dequeue) would need software changes to a QH...
  697. */
  698. static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh)
  699. {
  700. struct ehci_qtd *qtd;
  701. if (list_empty(&qh->qtd_list))
  702. qtd = qh->dummy;
  703. else {
  704. qtd = list_entry(qh->qtd_list.next,
  705. struct ehci_qtd, qtd_list);
  706. /* first qtd may already be partially processed */
  707. if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current)
  708. qtd = NULL;
  709. }
  710. if (qtd)
  711. qh_update(oxu, qh, qtd);
  712. }
  713. static void qtd_copy_status(struct oxu_hcd *oxu, struct urb *urb,
  714. size_t length, u32 token)
  715. {
  716. /* count IN/OUT bytes, not SETUP (even short packets) */
  717. if (likely(QTD_PID(token) != 2))
  718. urb->actual_length += length - QTD_LENGTH(token);
  719. /* don't modify error codes */
  720. if (unlikely(urb->status != -EINPROGRESS))
  721. return;
  722. /* force cleanup after short read; not always an error */
  723. if (unlikely(IS_SHORT_READ(token)))
  724. urb->status = -EREMOTEIO;
  725. /* serious "can't proceed" faults reported by the hardware */
  726. if (token & QTD_STS_HALT) {
  727. if (token & QTD_STS_BABBLE) {
  728. /* FIXME "must" disable babbling device's port too */
  729. urb->status = -EOVERFLOW;
  730. } else if (token & QTD_STS_MMF) {
  731. /* fs/ls interrupt xfer missed the complete-split */
  732. urb->status = -EPROTO;
  733. } else if (token & QTD_STS_DBE) {
  734. urb->status = (QTD_PID(token) == 1) /* IN ? */
  735. ? -ENOSR /* hc couldn't read data */
  736. : -ECOMM; /* hc couldn't write data */
  737. } else if (token & QTD_STS_XACT) {
  738. /* timeout, bad crc, wrong PID, etc; retried */
  739. if (QTD_CERR(token))
  740. urb->status = -EPIPE;
  741. else {
  742. oxu_dbg(oxu, "devpath %s ep%d%s 3strikes\n",
  743. urb->dev->devpath,
  744. usb_pipeendpoint(urb->pipe),
  745. usb_pipein(urb->pipe) ? "in" : "out");
  746. urb->status = -EPROTO;
  747. }
  748. /* CERR nonzero + no errors + halt --> stall */
  749. } else if (QTD_CERR(token))
  750. urb->status = -EPIPE;
  751. else /* unknown */
  752. urb->status = -EPROTO;
  753. oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n",
  754. usb_pipedevice(urb->pipe),
  755. usb_pipeendpoint(urb->pipe),
  756. usb_pipein(urb->pipe) ? "in" : "out",
  757. token, urb->status);
  758. }
  759. }
  760. static void ehci_urb_done(struct oxu_hcd *oxu, struct urb *urb)
  761. __releases(oxu->lock)
  762. __acquires(oxu->lock)
  763. {
  764. if (likely(urb->hcpriv != NULL)) {
  765. struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
  766. /* S-mask in a QH means it's an interrupt urb */
  767. if ((qh->hw_info2 & cpu_to_le32(QH_SMASK)) != 0) {
  768. /* ... update hc-wide periodic stats (for usbfs) */
  769. oxu_to_hcd(oxu)->self.bandwidth_int_reqs--;
  770. }
  771. qh_put(qh);
  772. }
  773. urb->hcpriv = NULL;
  774. switch (urb->status) {
  775. case -EINPROGRESS: /* success */
  776. urb->status = 0;
  777. default: /* fault */
  778. break;
  779. case -EREMOTEIO: /* fault or normal */
  780. if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
  781. urb->status = 0;
  782. break;
  783. case -ECONNRESET: /* canceled */
  784. case -ENOENT:
  785. break;
  786. }
  787. #ifdef OXU_URB_TRACE
  788. oxu_dbg(oxu, "%s %s urb %p ep%d%s status %d len %d/%d\n",
  789. __func__, urb->dev->devpath, urb,
  790. usb_pipeendpoint(urb->pipe),
  791. usb_pipein(urb->pipe) ? "in" : "out",
  792. urb->status,
  793. urb->actual_length, urb->transfer_buffer_length);
  794. #endif
  795. /* complete() can reenter this HCD */
  796. spin_unlock(&oxu->lock);
  797. usb_hcd_giveback_urb(oxu_to_hcd(oxu), urb, urb->status);
  798. spin_lock(&oxu->lock);
  799. }
  800. static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
  801. static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
  802. static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
  803. static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
  804. #define HALT_BIT cpu_to_le32(QTD_STS_HALT)
  805. /* Process and free completed qtds for a qh, returning URBs to drivers.
  806. * Chases up to qh->hw_current. Returns number of completions called,
  807. * indicating how much "real" work we did.
  808. */
  809. static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
  810. {
  811. struct ehci_qtd *last = NULL, *end = qh->dummy;
  812. struct list_head *entry, *tmp;
  813. int stopped;
  814. unsigned count = 0;
  815. int do_status = 0;
  816. u8 state;
  817. struct oxu_murb *murb = NULL;
  818. if (unlikely(list_empty(&qh->qtd_list)))
  819. return count;
  820. /* completions (or tasks on other cpus) must never clobber HALT
  821. * till we've gone through and cleaned everything up, even when
  822. * they add urbs to this qh's queue or mark them for unlinking.
  823. *
  824. * NOTE: unlinking expects to be done in queue order.
  825. */
  826. state = qh->qh_state;
  827. qh->qh_state = QH_STATE_COMPLETING;
  828. stopped = (state == QH_STATE_IDLE);
  829. /* remove de-activated QTDs from front of queue.
  830. * after faults (including short reads), cleanup this urb
  831. * then let the queue advance.
  832. * if queue is stopped, handles unlinks.
  833. */
  834. list_for_each_safe(entry, tmp, &qh->qtd_list) {
  835. struct ehci_qtd *qtd;
  836. struct urb *urb;
  837. u32 token = 0;
  838. qtd = list_entry(entry, struct ehci_qtd, qtd_list);
  839. urb = qtd->urb;
  840. /* Clean up any state from previous QTD ...*/
  841. if (last) {
  842. if (likely(last->urb != urb)) {
  843. if (last->urb->complete == NULL) {
  844. murb = (struct oxu_murb *) last->urb;
  845. last->urb = murb->main;
  846. if (murb->last) {
  847. ehci_urb_done(oxu, last->urb);
  848. count++;
  849. }
  850. oxu_murb_free(oxu, murb);
  851. } else {
  852. ehci_urb_done(oxu, last->urb);
  853. count++;
  854. }
  855. }
  856. oxu_qtd_free(oxu, last);
  857. last = NULL;
  858. }
  859. /* ignore urbs submitted during completions we reported */
  860. if (qtd == end)
  861. break;
  862. /* hardware copies qtd out of qh overlay */
  863. rmb();
  864. token = le32_to_cpu(qtd->hw_token);
  865. /* always clean up qtds the hc de-activated */
  866. if ((token & QTD_STS_ACTIVE) == 0) {
  867. if ((token & QTD_STS_HALT) != 0) {
  868. stopped = 1;
  869. /* magic dummy for some short reads; qh won't advance.
  870. * that silicon quirk can kick in with this dummy too.
  871. */
  872. } else if (IS_SHORT_READ(token) &&
  873. !(qtd->hw_alt_next & EHCI_LIST_END)) {
  874. stopped = 1;
  875. goto halt;
  876. }
  877. /* stop scanning when we reach qtds the hc is using */
  878. } else if (likely(!stopped &&
  879. HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) {
  880. break;
  881. } else {
  882. stopped = 1;
  883. if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)))
  884. urb->status = -ESHUTDOWN;
  885. /* ignore active urbs unless some previous qtd
  886. * for the urb faulted (including short read) or
  887. * its urb was canceled. we may patch qh or qtds.
  888. */
  889. if (likely(urb->status == -EINPROGRESS))
  890. continue;
  891. /* issue status after short control reads */
  892. if (unlikely(do_status != 0)
  893. && QTD_PID(token) == 0 /* OUT */) {
  894. do_status = 0;
  895. continue;
  896. }
  897. /* token in overlay may be most current */
  898. if (state == QH_STATE_IDLE
  899. && cpu_to_le32(qtd->qtd_dma)
  900. == qh->hw_current)
  901. token = le32_to_cpu(qh->hw_token);
  902. /* force halt for unlinked or blocked qh, so we'll
  903. * patch the qh later and so that completions can't
  904. * activate it while we "know" it's stopped.
  905. */
  906. if ((HALT_BIT & qh->hw_token) == 0) {
  907. halt:
  908. qh->hw_token |= HALT_BIT;
  909. wmb();
  910. }
  911. }
  912. /* Remove it from the queue */
  913. qtd_copy_status(oxu, urb->complete ?
  914. urb : ((struct oxu_murb *) urb)->main,
  915. qtd->length, token);
  916. if ((usb_pipein(qtd->urb->pipe)) &&
  917. (NULL != qtd->transfer_buffer))
  918. memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length);
  919. do_status = (urb->status == -EREMOTEIO)
  920. && usb_pipecontrol(urb->pipe);
  921. if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
  922. last = list_entry(qtd->qtd_list.prev,
  923. struct ehci_qtd, qtd_list);
  924. last->hw_next = qtd->hw_next;
  925. }
  926. list_del(&qtd->qtd_list);
  927. last = qtd;
  928. }
  929. /* last urb's completion might still need calling */
  930. if (likely(last != NULL)) {
  931. if (last->urb->complete == NULL) {
  932. murb = (struct oxu_murb *) last->urb;
  933. last->urb = murb->main;
  934. if (murb->last) {
  935. ehci_urb_done(oxu, last->urb);
  936. count++;
  937. }
  938. oxu_murb_free(oxu, murb);
  939. } else {
  940. ehci_urb_done(oxu, last->urb);
  941. count++;
  942. }
  943. oxu_qtd_free(oxu, last);
  944. }
  945. /* restore original state; caller must unlink or relink */
  946. qh->qh_state = state;
  947. /* be sure the hardware's done with the qh before refreshing
  948. * it after fault cleanup, or recovering from silicon wrongly
  949. * overlaying the dummy qtd (which reduces DMA chatter).
  950. */
  951. if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
  952. switch (state) {
  953. case QH_STATE_IDLE:
  954. qh_refresh(oxu, qh);
  955. break;
  956. case QH_STATE_LINKED:
  957. /* should be rare for periodic transfers,
  958. * except maybe high bandwidth ...
  959. */
  960. if ((cpu_to_le32(QH_SMASK)
  961. & qh->hw_info2) != 0) {
  962. intr_deschedule(oxu, qh);
  963. (void) qh_schedule(oxu, qh);
  964. } else
  965. unlink_async(oxu, qh);
  966. break;
  967. /* otherwise, unlink already started */
  968. }
  969. }
  970. return count;
  971. }
  972. /* High bandwidth multiplier, as encoded in highspeed endpoint descriptors */
  973. #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
  974. /* ... and packet size, for any kind of endpoint descriptor */
  975. #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
  976. /* Reverse of qh_urb_transaction: free a list of TDs.
  977. * used for cleanup after errors, before HC sees an URB's TDs.
  978. */
  979. static void qtd_list_free(struct oxu_hcd *oxu,
  980. struct urb *urb, struct list_head *qtd_list)
  981. {
  982. struct list_head *entry, *temp;
  983. list_for_each_safe(entry, temp, qtd_list) {
  984. struct ehci_qtd *qtd;
  985. qtd = list_entry(entry, struct ehci_qtd, qtd_list);
  986. list_del(&qtd->qtd_list);
  987. oxu_qtd_free(oxu, qtd);
  988. }
  989. }
  990. /* Create a list of filled qtds for this URB; won't link into qh.
  991. */
  992. static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu,
  993. struct urb *urb,
  994. struct list_head *head,
  995. gfp_t flags)
  996. {
  997. struct ehci_qtd *qtd, *qtd_prev;
  998. dma_addr_t buf;
  999. int len, maxpacket;
  1000. int is_input;
  1001. u32 token;
  1002. void *transfer_buf = NULL;
  1003. int ret;
  1004. /*
  1005. * URBs map to sequences of QTDs: one logical transaction
  1006. */
  1007. qtd = ehci_qtd_alloc(oxu);
  1008. if (unlikely(!qtd))
  1009. return NULL;
  1010. list_add_tail(&qtd->qtd_list, head);
  1011. qtd->urb = urb;
  1012. token = QTD_STS_ACTIVE;
  1013. token |= (EHCI_TUNE_CERR << 10);
  1014. /* for split transactions, SplitXState initialized to zero */
  1015. len = urb->transfer_buffer_length;
  1016. is_input = usb_pipein(urb->pipe);
  1017. if (!urb->transfer_buffer && urb->transfer_buffer_length && is_input)
  1018. urb->transfer_buffer = phys_to_virt(urb->transfer_dma);
  1019. if (usb_pipecontrol(urb->pipe)) {
  1020. /* SETUP pid */
  1021. ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest));
  1022. if (ret)
  1023. goto cleanup;
  1024. qtd_fill(qtd, qtd->buffer_dma, sizeof(struct usb_ctrlrequest),
  1025. token | (2 /* "setup" */ << 8), 8);
  1026. memcpy(qtd->buffer, qtd->urb->setup_packet,
  1027. sizeof(struct usb_ctrlrequest));
  1028. /* ... and always at least one more pid */
  1029. token ^= QTD_TOGGLE;
  1030. qtd_prev = qtd;
  1031. qtd = ehci_qtd_alloc(oxu);
  1032. if (unlikely(!qtd))
  1033. goto cleanup;
  1034. qtd->urb = urb;
  1035. qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
  1036. list_add_tail(&qtd->qtd_list, head);
  1037. /* for zero length DATA stages, STATUS is always IN */
  1038. if (len == 0)
  1039. token |= (1 /* "in" */ << 8);
  1040. }
  1041. /*
  1042. * Data transfer stage: buffer setup
  1043. */
  1044. ret = oxu_buf_alloc(oxu, qtd, len);
  1045. if (ret)
  1046. goto cleanup;
  1047. buf = qtd->buffer_dma;
  1048. transfer_buf = urb->transfer_buffer;
  1049. if (!is_input)
  1050. memcpy(qtd->buffer, qtd->urb->transfer_buffer, len);
  1051. if (is_input)
  1052. token |= (1 /* "in" */ << 8);
  1053. /* else it's already initted to "out" pid (0 << 8) */
  1054. maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
  1055. /*
  1056. * buffer gets wrapped in one or more qtds;
  1057. * last one may be "short" (including zero len)
  1058. * and may serve as a control status ack
  1059. */
  1060. for (;;) {
  1061. int this_qtd_len;
  1062. this_qtd_len = qtd_fill(qtd, buf, len, token, maxpacket);
  1063. qtd->transfer_buffer = transfer_buf;
  1064. len -= this_qtd_len;
  1065. buf += this_qtd_len;
  1066. transfer_buf += this_qtd_len;
  1067. if (is_input)
  1068. qtd->hw_alt_next = oxu->async->hw_alt_next;
  1069. /* qh makes control packets use qtd toggle; maybe switch it */
  1070. if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
  1071. token ^= QTD_TOGGLE;
  1072. if (likely(len <= 0))
  1073. break;
  1074. qtd_prev = qtd;
  1075. qtd = ehci_qtd_alloc(oxu);
  1076. if (unlikely(!qtd))
  1077. goto cleanup;
  1078. if (likely(len > 0)) {
  1079. ret = oxu_buf_alloc(oxu, qtd, len);
  1080. if (ret)
  1081. goto cleanup;
  1082. }
  1083. qtd->urb = urb;
  1084. qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
  1085. list_add_tail(&qtd->qtd_list, head);
  1086. }
  1087. /* unless the bulk/interrupt caller wants a chance to clean
  1088. * up after short reads, hc should advance qh past this urb
  1089. */
  1090. if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
  1091. || usb_pipecontrol(urb->pipe)))
  1092. qtd->hw_alt_next = EHCI_LIST_END;
  1093. /*
  1094. * control requests may need a terminating data "status" ack;
  1095. * bulk ones may need a terminating short packet (zero length).
  1096. */
  1097. if (likely(urb->transfer_buffer_length != 0)) {
  1098. int one_more = 0;
  1099. if (usb_pipecontrol(urb->pipe)) {
  1100. one_more = 1;
  1101. token ^= 0x0100; /* "in" <--> "out" */
  1102. token |= QTD_TOGGLE; /* force DATA1 */
  1103. } else if (usb_pipebulk(urb->pipe)
  1104. && (urb->transfer_flags & URB_ZERO_PACKET)
  1105. && !(urb->transfer_buffer_length % maxpacket)) {
  1106. one_more = 1;
  1107. }
  1108. if (one_more) {
  1109. qtd_prev = qtd;
  1110. qtd = ehci_qtd_alloc(oxu);
  1111. if (unlikely(!qtd))
  1112. goto cleanup;
  1113. qtd->urb = urb;
  1114. qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
  1115. list_add_tail(&qtd->qtd_list, head);
  1116. /* never any data in such packets */
  1117. qtd_fill(qtd, 0, 0, token, 0);
  1118. }
  1119. }
  1120. /* by default, enable interrupt on urb completion */
  1121. qtd->hw_token |= cpu_to_le32(QTD_IOC);
  1122. return head;
  1123. cleanup:
  1124. qtd_list_free(oxu, urb, head);
  1125. return NULL;
  1126. }
  1127. /* Each QH holds a qtd list; a QH is used for everything except iso.
  1128. *
  1129. * For interrupt urbs, the scheduler must set the microframe scheduling
  1130. * mask(s) each time the QH gets scheduled. For highspeed, that's
  1131. * just one microframe in the s-mask. For split interrupt transactions
  1132. * there are additional complications: c-mask, maybe FSTNs.
  1133. */
  1134. static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
  1135. struct urb *urb, gfp_t flags)
  1136. {
  1137. struct ehci_qh *qh = oxu_qh_alloc(oxu);
  1138. u32 info1 = 0, info2 = 0;
  1139. int is_input, type;
  1140. int maxp = 0;
  1141. if (!qh)
  1142. return qh;
  1143. /*
  1144. * init endpoint/device data for this QH
  1145. */
  1146. info1 |= usb_pipeendpoint(urb->pipe) << 8;
  1147. info1 |= usb_pipedevice(urb->pipe) << 0;
  1148. is_input = usb_pipein(urb->pipe);
  1149. type = usb_pipetype(urb->pipe);
  1150. maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
  1151. /* Compute interrupt scheduling parameters just once, and save.
  1152. * - allowing for high bandwidth, how many nsec/uframe are used?
  1153. * - split transactions need a second CSPLIT uframe; same question
  1154. * - splits also need a schedule gap (for full/low speed I/O)
  1155. * - qh has a polling interval
  1156. *
  1157. * For control/bulk requests, the HC or TT handles these.
  1158. */
  1159. if (type == PIPE_INTERRUPT) {
  1160. qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
  1161. is_input, 0,
  1162. hb_mult(maxp) * max_packet(maxp)));
  1163. qh->start = NO_FRAME;
  1164. if (urb->dev->speed == USB_SPEED_HIGH) {
  1165. qh->c_usecs = 0;
  1166. qh->gap_uf = 0;
  1167. qh->period = urb->interval >> 3;
  1168. if (qh->period == 0 && urb->interval != 1) {
  1169. /* NOTE interval 2 or 4 uframes could work.
  1170. * But interval 1 scheduling is simpler, and
  1171. * includes high bandwidth.
  1172. */
  1173. oxu_dbg(oxu, "intr period %d uframes, NYET!\n",
  1174. urb->interval);
  1175. goto done;
  1176. }
  1177. } else {
  1178. struct usb_tt *tt = urb->dev->tt;
  1179. int think_time;
  1180. /* gap is f(FS/LS transfer times) */
  1181. qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
  1182. is_input, 0, maxp) / (125 * 1000);
  1183. /* FIXME this just approximates SPLIT/CSPLIT times */
  1184. if (is_input) { /* SPLIT, gap, CSPLIT+DATA */
  1185. qh->c_usecs = qh->usecs + HS_USECS(0);
  1186. qh->usecs = HS_USECS(1);
  1187. } else { /* SPLIT+DATA, gap, CSPLIT */
  1188. qh->usecs += HS_USECS(1);
  1189. qh->c_usecs = HS_USECS(0);
  1190. }
  1191. think_time = tt ? tt->think_time : 0;
  1192. qh->tt_usecs = NS_TO_US(think_time +
  1193. usb_calc_bus_time(urb->dev->speed,
  1194. is_input, 0, max_packet(maxp)));
  1195. qh->period = urb->interval;
  1196. }
  1197. }
  1198. /* support for tt scheduling, and access to toggles */
  1199. qh->dev = urb->dev;
  1200. /* using TT? */
  1201. switch (urb->dev->speed) {
  1202. case USB_SPEED_LOW:
  1203. info1 |= (1 << 12); /* EPS "low" */
  1204. /* FALL THROUGH */
  1205. case USB_SPEED_FULL:
  1206. /* EPS 0 means "full" */
  1207. if (type != PIPE_INTERRUPT)
  1208. info1 |= (EHCI_TUNE_RL_TT << 28);
  1209. if (type == PIPE_CONTROL) {
  1210. info1 |= (1 << 27); /* for TT */
  1211. info1 |= 1 << 14; /* toggle from qtd */
  1212. }
  1213. info1 |= maxp << 16;
  1214. info2 |= (EHCI_TUNE_MULT_TT << 30);
  1215. info2 |= urb->dev->ttport << 23;
  1216. /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
  1217. break;
  1218. case USB_SPEED_HIGH: /* no TT involved */
  1219. info1 |= (2 << 12); /* EPS "high" */
  1220. if (type == PIPE_CONTROL) {
  1221. info1 |= (EHCI_TUNE_RL_HS << 28);
  1222. info1 |= 64 << 16; /* usb2 fixed maxpacket */
  1223. info1 |= 1 << 14; /* toggle from qtd */
  1224. info2 |= (EHCI_TUNE_MULT_HS << 30);
  1225. } else if (type == PIPE_BULK) {
  1226. info1 |= (EHCI_TUNE_RL_HS << 28);
  1227. info1 |= 512 << 16; /* usb2 fixed maxpacket */
  1228. info2 |= (EHCI_TUNE_MULT_HS << 30);
  1229. } else { /* PIPE_INTERRUPT */
  1230. info1 |= max_packet(maxp) << 16;
  1231. info2 |= hb_mult(maxp) << 30;
  1232. }
  1233. break;
  1234. default:
  1235. oxu_dbg(oxu, "bogus dev %p speed %d\n", urb->dev, urb->dev->speed);
  1236. done:
  1237. qh_put(qh);
  1238. return NULL;
  1239. }
  1240. /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
  1241. /* init as live, toggle clear, advance to dummy */
  1242. qh->qh_state = QH_STATE_IDLE;
  1243. qh->hw_info1 = cpu_to_le32(info1);
  1244. qh->hw_info2 = cpu_to_le32(info2);
  1245. usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
  1246. qh_refresh(oxu, qh);
  1247. return qh;
  1248. }
  1249. /* Move qh (and its qtds) onto async queue; maybe enable queue.
  1250. */
  1251. static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1252. {
  1253. __le32 dma = QH_NEXT(qh->qh_dma);
  1254. struct ehci_qh *head;
  1255. /* (re)start the async schedule? */
  1256. head = oxu->async;
  1257. timer_action_done(oxu, TIMER_ASYNC_OFF);
  1258. if (!head->qh_next.qh) {
  1259. u32 cmd = readl(&oxu->regs->command);
  1260. if (!(cmd & CMD_ASE)) {
  1261. /* in case a clear of CMD_ASE didn't take yet */
  1262. (void)handshake(oxu, &oxu->regs->status,
  1263. STS_ASS, 0, 150);
  1264. cmd |= CMD_ASE | CMD_RUN;
  1265. writel(cmd, &oxu->regs->command);
  1266. oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
  1267. /* posted write need not be known to HC yet ... */
  1268. }
  1269. }
  1270. /* clear halt and/or toggle; and maybe recover from silicon quirk */
  1271. if (qh->qh_state == QH_STATE_IDLE)
  1272. qh_refresh(oxu, qh);
  1273. /* splice right after start */
  1274. qh->qh_next = head->qh_next;
  1275. qh->hw_next = head->hw_next;
  1276. wmb();
  1277. head->qh_next.qh = qh;
  1278. head->hw_next = dma;
  1279. qh->qh_state = QH_STATE_LINKED;
  1280. /* qtd completions reported later by interrupt */
  1281. }
  1282. #define QH_ADDR_MASK cpu_to_le32(0x7f)
  1283. /*
  1284. * For control/bulk/interrupt, return QH with these TDs appended.
  1285. * Allocates and initializes the QH if necessary.
  1286. * Returns null if it can't allocate a QH it needs to.
  1287. * If the QH has TDs (urbs) already, that's great.
  1288. */
  1289. static struct ehci_qh *qh_append_tds(struct oxu_hcd *oxu,
  1290. struct urb *urb, struct list_head *qtd_list,
  1291. int epnum, void **ptr)
  1292. {
  1293. struct ehci_qh *qh = NULL;
  1294. qh = (struct ehci_qh *) *ptr;
  1295. if (unlikely(qh == NULL)) {
  1296. /* can't sleep here, we have oxu->lock... */
  1297. qh = qh_make(oxu, urb, GFP_ATOMIC);
  1298. *ptr = qh;
  1299. }
  1300. if (likely(qh != NULL)) {
  1301. struct ehci_qtd *qtd;
  1302. if (unlikely(list_empty(qtd_list)))
  1303. qtd = NULL;
  1304. else
  1305. qtd = list_entry(qtd_list->next, struct ehci_qtd,
  1306. qtd_list);
  1307. /* control qh may need patching ... */
  1308. if (unlikely(epnum == 0)) {
  1309. /* usb_reset_device() briefly reverts to address 0 */
  1310. if (usb_pipedevice(urb->pipe) == 0)
  1311. qh->hw_info1 &= ~QH_ADDR_MASK;
  1312. }
  1313. /* just one way to queue requests: swap with the dummy qtd.
  1314. * only hc or qh_refresh() ever modify the overlay.
  1315. */
  1316. if (likely(qtd != NULL)) {
  1317. struct ehci_qtd *dummy;
  1318. dma_addr_t dma;
  1319. __le32 token;
  1320. /* to avoid racing the HC, use the dummy td instead of
  1321. * the first td of our list (becomes new dummy). both
  1322. * tds stay deactivated until we're done, when the
  1323. * HC is allowed to fetch the old dummy (4.10.2).
  1324. */
  1325. token = qtd->hw_token;
  1326. qtd->hw_token = HALT_BIT;
  1327. wmb();
  1328. dummy = qh->dummy;
  1329. dma = dummy->qtd_dma;
  1330. *dummy = *qtd;
  1331. dummy->qtd_dma = dma;
  1332. list_del(&qtd->qtd_list);
  1333. list_add(&dummy->qtd_list, qtd_list);
  1334. list_splice(qtd_list, qh->qtd_list.prev);
  1335. ehci_qtd_init(qtd, qtd->qtd_dma);
  1336. qh->dummy = qtd;
  1337. /* hc must see the new dummy at list end */
  1338. dma = qtd->qtd_dma;
  1339. qtd = list_entry(qh->qtd_list.prev,
  1340. struct ehci_qtd, qtd_list);
  1341. qtd->hw_next = QTD_NEXT(dma);
  1342. /* let the hc process these next qtds */
  1343. dummy->hw_token = (token & ~(0x80));
  1344. wmb();
  1345. dummy->hw_token = token;
  1346. urb->hcpriv = qh_get(qh);
  1347. }
  1348. }
  1349. return qh;
  1350. }
  1351. static int submit_async(struct oxu_hcd *oxu, struct urb *urb,
  1352. struct list_head *qtd_list, gfp_t mem_flags)
  1353. {
  1354. struct ehci_qtd *qtd;
  1355. int epnum;
  1356. unsigned long flags;
  1357. struct ehci_qh *qh = NULL;
  1358. int rc = 0;
  1359. qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
  1360. epnum = urb->ep->desc.bEndpointAddress;
  1361. #ifdef OXU_URB_TRACE
  1362. oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
  1363. __func__, urb->dev->devpath, urb,
  1364. epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
  1365. urb->transfer_buffer_length,
  1366. qtd, urb->ep->hcpriv);
  1367. #endif
  1368. spin_lock_irqsave(&oxu->lock, flags);
  1369. if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
  1370. rc = -ESHUTDOWN;
  1371. goto done;
  1372. }
  1373. qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
  1374. if (unlikely(qh == NULL)) {
  1375. rc = -ENOMEM;
  1376. goto done;
  1377. }
  1378. /* Control/bulk operations through TTs don't need scheduling,
  1379. * the HC and TT handle it when the TT has a buffer ready.
  1380. */
  1381. if (likely(qh->qh_state == QH_STATE_IDLE))
  1382. qh_link_async(oxu, qh_get(qh));
  1383. done:
  1384. spin_unlock_irqrestore(&oxu->lock, flags);
  1385. if (unlikely(qh == NULL))
  1386. qtd_list_free(oxu, urb, qtd_list);
  1387. return rc;
  1388. }
  1389. /* The async qh for the qtds being reclaimed are now unlinked from the HC */
  1390. static void end_unlink_async(struct oxu_hcd *oxu)
  1391. {
  1392. struct ehci_qh *qh = oxu->reclaim;
  1393. struct ehci_qh *next;
  1394. timer_action_done(oxu, TIMER_IAA_WATCHDOG);
  1395. qh->qh_state = QH_STATE_IDLE;
  1396. qh->qh_next.qh = NULL;
  1397. qh_put(qh); /* refcount from reclaim */
  1398. /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
  1399. next = qh->reclaim;
  1400. oxu->reclaim = next;
  1401. oxu->reclaim_ready = 0;
  1402. qh->reclaim = NULL;
  1403. qh_completions(oxu, qh);
  1404. if (!list_empty(&qh->qtd_list)
  1405. && HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
  1406. qh_link_async(oxu, qh);
  1407. else {
  1408. qh_put(qh); /* refcount from async list */
  1409. /* it's not free to turn the async schedule on/off; leave it
  1410. * active but idle for a while once it empties.
  1411. */
  1412. if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state)
  1413. && oxu->async->qh_next.qh == NULL)
  1414. timer_action(oxu, TIMER_ASYNC_OFF);
  1415. }
  1416. if (next) {
  1417. oxu->reclaim = NULL;
  1418. start_unlink_async(oxu, next);
  1419. }
  1420. }
  1421. /* makes sure the async qh will become idle */
  1422. /* caller must own oxu->lock */
  1423. static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1424. {
  1425. int cmd = readl(&oxu->regs->command);
  1426. struct ehci_qh *prev;
  1427. #ifdef DEBUG
  1428. assert_spin_locked(&oxu->lock);
  1429. if (oxu->reclaim || (qh->qh_state != QH_STATE_LINKED
  1430. && qh->qh_state != QH_STATE_UNLINK_WAIT))
  1431. BUG();
  1432. #endif
  1433. /* stop async schedule right now? */
  1434. if (unlikely(qh == oxu->async)) {
  1435. /* can't get here without STS_ASS set */
  1436. if (oxu_to_hcd(oxu)->state != HC_STATE_HALT
  1437. && !oxu->reclaim) {
  1438. /* ... and CMD_IAAD clear */
  1439. writel(cmd & ~CMD_ASE, &oxu->regs->command);
  1440. wmb();
  1441. /* handshake later, if we need to */
  1442. timer_action_done(oxu, TIMER_ASYNC_OFF);
  1443. }
  1444. return;
  1445. }
  1446. qh->qh_state = QH_STATE_UNLINK;
  1447. oxu->reclaim = qh = qh_get(qh);
  1448. prev = oxu->async;
  1449. while (prev->qh_next.qh != qh)
  1450. prev = prev->qh_next.qh;
  1451. prev->hw_next = qh->hw_next;
  1452. prev->qh_next = qh->qh_next;
  1453. wmb();
  1454. if (unlikely(oxu_to_hcd(oxu)->state == HC_STATE_HALT)) {
  1455. /* if (unlikely(qh->reclaim != 0))
  1456. * this will recurse, probably not much
  1457. */
  1458. end_unlink_async(oxu);
  1459. return;
  1460. }
  1461. oxu->reclaim_ready = 0;
  1462. cmd |= CMD_IAAD;
  1463. writel(cmd, &oxu->regs->command);
  1464. (void) readl(&oxu->regs->command);
  1465. timer_action(oxu, TIMER_IAA_WATCHDOG);
  1466. }
  1467. static void scan_async(struct oxu_hcd *oxu)
  1468. {
  1469. struct ehci_qh *qh;
  1470. enum ehci_timer_action action = TIMER_IO_WATCHDOG;
  1471. if (!++(oxu->stamp))
  1472. oxu->stamp++;
  1473. timer_action_done(oxu, TIMER_ASYNC_SHRINK);
  1474. rescan:
  1475. qh = oxu->async->qh_next.qh;
  1476. if (likely(qh != NULL)) {
  1477. do {
  1478. /* clean any finished work for this qh */
  1479. if (!list_empty(&qh->qtd_list)
  1480. && qh->stamp != oxu->stamp) {
  1481. int temp;
  1482. /* unlinks could happen here; completion
  1483. * reporting drops the lock. rescan using
  1484. * the latest schedule, but don't rescan
  1485. * qhs we already finished (no looping).
  1486. */
  1487. qh = qh_get(qh);
  1488. qh->stamp = oxu->stamp;
  1489. temp = qh_completions(oxu, qh);
  1490. qh_put(qh);
  1491. if (temp != 0)
  1492. goto rescan;
  1493. }
  1494. /* unlink idle entries, reducing HC PCI usage as well
  1495. * as HCD schedule-scanning costs. delay for any qh
  1496. * we just scanned, there's a not-unusual case that it
  1497. * doesn't stay idle for long.
  1498. * (plus, avoids some kind of re-activation race.)
  1499. */
  1500. if (list_empty(&qh->qtd_list)) {
  1501. if (qh->stamp == oxu->stamp)
  1502. action = TIMER_ASYNC_SHRINK;
  1503. else if (!oxu->reclaim
  1504. && qh->qh_state == QH_STATE_LINKED)
  1505. start_unlink_async(oxu, qh);
  1506. }
  1507. qh = qh->qh_next.qh;
  1508. } while (qh);
  1509. }
  1510. if (action == TIMER_ASYNC_SHRINK)
  1511. timer_action(oxu, TIMER_ASYNC_SHRINK);
  1512. }
  1513. /*
  1514. * periodic_next_shadow - return "next" pointer on shadow list
  1515. * @periodic: host pointer to qh/itd/sitd
  1516. * @tag: hardware tag for type of this record
  1517. */
  1518. static union ehci_shadow *periodic_next_shadow(union ehci_shadow *periodic,
  1519. __le32 tag)
  1520. {
  1521. switch (tag) {
  1522. default:
  1523. case Q_TYPE_QH:
  1524. return &periodic->qh->qh_next;
  1525. }
  1526. }
  1527. /* caller must hold oxu->lock */
  1528. static void periodic_unlink(struct oxu_hcd *oxu, unsigned frame, void *ptr)
  1529. {
  1530. union ehci_shadow *prev_p = &oxu->pshadow[frame];
  1531. __le32 *hw_p = &oxu->periodic[frame];
  1532. union ehci_shadow here = *prev_p;
  1533. /* find predecessor of "ptr"; hw and shadow lists are in sync */
  1534. while (here.ptr && here.ptr != ptr) {
  1535. prev_p = periodic_next_shadow(prev_p, Q_NEXT_TYPE(*hw_p));
  1536. hw_p = here.hw_next;
  1537. here = *prev_p;
  1538. }
  1539. /* an interrupt entry (at list end) could have been shared */
  1540. if (!here.ptr)
  1541. return;
  1542. /* update shadow and hardware lists ... the old "next" pointers
  1543. * from ptr may still be in use, the caller updates them.
  1544. */
  1545. *prev_p = *periodic_next_shadow(&here, Q_NEXT_TYPE(*hw_p));
  1546. *hw_p = *here.hw_next;
  1547. }
  1548. /* how many of the uframe's 125 usecs are allocated? */
  1549. static unsigned short periodic_usecs(struct oxu_hcd *oxu,
  1550. unsigned frame, unsigned uframe)
  1551. {
  1552. __le32 *hw_p = &oxu->periodic[frame];
  1553. union ehci_shadow *q = &oxu->pshadow[frame];
  1554. unsigned usecs = 0;
  1555. while (q->ptr) {
  1556. switch (Q_NEXT_TYPE(*hw_p)) {
  1557. case Q_TYPE_QH:
  1558. default:
  1559. /* is it in the S-mask? */
  1560. if (q->qh->hw_info2 & cpu_to_le32(1 << uframe))
  1561. usecs += q->qh->usecs;
  1562. /* ... or C-mask? */
  1563. if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe)))
  1564. usecs += q->qh->c_usecs;
  1565. hw_p = &q->qh->hw_next;
  1566. q = &q->qh->qh_next;
  1567. break;
  1568. }
  1569. }
  1570. #ifdef DEBUG
  1571. if (usecs > 100)
  1572. oxu_err(oxu, "uframe %d sched overrun: %d usecs\n",
  1573. frame * 8 + uframe, usecs);
  1574. #endif
  1575. return usecs;
  1576. }
  1577. static int enable_periodic(struct oxu_hcd *oxu)
  1578. {
  1579. u32 cmd;
  1580. int status;
  1581. /* did clearing PSE did take effect yet?
  1582. * takes effect only at frame boundaries...
  1583. */
  1584. status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125);
  1585. if (status != 0) {
  1586. oxu_to_hcd(oxu)->state = HC_STATE_HALT;
  1587. usb_hc_died(oxu_to_hcd(oxu));
  1588. return status;
  1589. }
  1590. cmd = readl(&oxu->regs->command) | CMD_PSE;
  1591. writel(cmd, &oxu->regs->command);
  1592. /* posted write ... PSS happens later */
  1593. oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
  1594. /* make sure ehci_work scans these */
  1595. oxu->next_uframe = readl(&oxu->regs->frame_index)
  1596. % (oxu->periodic_size << 3);
  1597. return 0;
  1598. }
  1599. static int disable_periodic(struct oxu_hcd *oxu)
  1600. {
  1601. u32 cmd;
  1602. int status;
  1603. /* did setting PSE not take effect yet?
  1604. * takes effect only at frame boundaries...
  1605. */
  1606. status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125);
  1607. if (status != 0) {
  1608. oxu_to_hcd(oxu)->state = HC_STATE_HALT;
  1609. usb_hc_died(oxu_to_hcd(oxu));
  1610. return status;
  1611. }
  1612. cmd = readl(&oxu->regs->command) & ~CMD_PSE;
  1613. writel(cmd, &oxu->regs->command);
  1614. /* posted write ... */
  1615. oxu->next_uframe = -1;
  1616. return 0;
  1617. }
  1618. /* periodic schedule slots have iso tds (normal or split) first, then a
  1619. * sparse tree for active interrupt transfers.
  1620. *
  1621. * this just links in a qh; caller guarantees uframe masks are set right.
  1622. * no FSTN support (yet; oxu 0.96+)
  1623. */
  1624. static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1625. {
  1626. unsigned i;
  1627. unsigned period = qh->period;
  1628. dev_dbg(&qh->dev->dev,
  1629. "link qh%d-%04x/%p start %d [%d/%d us]\n",
  1630. period, le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
  1631. qh, qh->start, qh->usecs, qh->c_usecs);
  1632. /* high bandwidth, or otherwise every microframe */
  1633. if (period == 0)
  1634. period = 1;
  1635. for (i = qh->start; i < oxu->periodic_size; i += period) {
  1636. union ehci_shadow *prev = &oxu->pshadow[i];
  1637. __le32 *hw_p = &oxu->periodic[i];
  1638. union ehci_shadow here = *prev;
  1639. __le32 type = 0;
  1640. /* skip the iso nodes at list head */
  1641. while (here.ptr) {
  1642. type = Q_NEXT_TYPE(*hw_p);
  1643. if (type == Q_TYPE_QH)
  1644. break;
  1645. prev = periodic_next_shadow(prev, type);
  1646. hw_p = &here.qh->hw_next;
  1647. here = *prev;
  1648. }
  1649. /* sorting each branch by period (slow-->fast)
  1650. * enables sharing interior tree nodes
  1651. */
  1652. while (here.ptr && qh != here.qh) {
  1653. if (qh->period > here.qh->period)
  1654. break;
  1655. prev = &here.qh->qh_next;
  1656. hw_p = &here.qh->hw_next;
  1657. here = *prev;
  1658. }
  1659. /* link in this qh, unless some earlier pass did that */
  1660. if (qh != here.qh) {
  1661. qh->qh_next = here;
  1662. if (here.qh)
  1663. qh->hw_next = *hw_p;
  1664. wmb();
  1665. prev->qh = qh;
  1666. *hw_p = QH_NEXT(qh->qh_dma);
  1667. }
  1668. }
  1669. qh->qh_state = QH_STATE_LINKED;
  1670. qh_get(qh);
  1671. /* update per-qh bandwidth for usbfs */
  1672. oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period
  1673. ? ((qh->usecs + qh->c_usecs) / qh->period)
  1674. : (qh->usecs * 8);
  1675. /* maybe enable periodic schedule processing */
  1676. if (!oxu->periodic_sched++)
  1677. return enable_periodic(oxu);
  1678. return 0;
  1679. }
  1680. static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1681. {
  1682. unsigned i;
  1683. unsigned period;
  1684. /* FIXME:
  1685. * IF this isn't high speed
  1686. * and this qh is active in the current uframe
  1687. * (and overlay token SplitXstate is false?)
  1688. * THEN
  1689. * qh->hw_info1 |= cpu_to_le32(1 << 7 "ignore");
  1690. */
  1691. /* high bandwidth, or otherwise part of every microframe */
  1692. period = qh->period;
  1693. if (period == 0)
  1694. period = 1;
  1695. for (i = qh->start; i < oxu->periodic_size; i += period)
  1696. periodic_unlink(oxu, i, qh);
  1697. /* update per-qh bandwidth for usbfs */
  1698. oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period
  1699. ? ((qh->usecs + qh->c_usecs) / qh->period)
  1700. : (qh->usecs * 8);
  1701. dev_dbg(&qh->dev->dev,
  1702. "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
  1703. qh->period,
  1704. le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
  1705. qh, qh->start, qh->usecs, qh->c_usecs);
  1706. /* qh->qh_next still "live" to HC */
  1707. qh->qh_state = QH_STATE_UNLINK;
  1708. qh->qh_next.ptr = NULL;
  1709. qh_put(qh);
  1710. /* maybe turn off periodic schedule */
  1711. oxu->periodic_sched--;
  1712. if (!oxu->periodic_sched)
  1713. (void) disable_periodic(oxu);
  1714. }
  1715. static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1716. {
  1717. unsigned wait;
  1718. qh_unlink_periodic(oxu, qh);
  1719. /* simple/paranoid: always delay, expecting the HC needs to read
  1720. * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
  1721. * expect hub_wq to clean up after any CSPLITs we won't issue.
  1722. * active high speed queues may need bigger delays...
  1723. */
  1724. if (list_empty(&qh->qtd_list)
  1725. || (cpu_to_le32(QH_CMASK) & qh->hw_info2) != 0)
  1726. wait = 2;
  1727. else
  1728. wait = 55; /* worst case: 3 * 1024 */
  1729. udelay(wait);
  1730. qh->qh_state = QH_STATE_IDLE;
  1731. qh->hw_next = EHCI_LIST_END;
  1732. wmb();
  1733. }
  1734. static int check_period(struct oxu_hcd *oxu,
  1735. unsigned frame, unsigned uframe,
  1736. unsigned period, unsigned usecs)
  1737. {
  1738. int claimed;
  1739. /* complete split running into next frame?
  1740. * given FSTN support, we could sometimes check...
  1741. */
  1742. if (uframe >= 8)
  1743. return 0;
  1744. /*
  1745. * 80% periodic == 100 usec/uframe available
  1746. * convert "usecs we need" to "max already claimed"
  1747. */
  1748. usecs = 100 - usecs;
  1749. /* we "know" 2 and 4 uframe intervals were rejected; so
  1750. * for period 0, check _every_ microframe in the schedule.
  1751. */
  1752. if (unlikely(period == 0)) {
  1753. do {
  1754. for (uframe = 0; uframe < 7; uframe++) {
  1755. claimed = periodic_usecs(oxu, frame, uframe);
  1756. if (claimed > usecs)
  1757. return 0;
  1758. }
  1759. } while ((frame += 1) < oxu->periodic_size);
  1760. /* just check the specified uframe, at that period */
  1761. } else {
  1762. do {
  1763. claimed = periodic_usecs(oxu, frame, uframe);
  1764. if (claimed > usecs)
  1765. return 0;
  1766. } while ((frame += period) < oxu->periodic_size);
  1767. }
  1768. return 1;
  1769. }
  1770. static int check_intr_schedule(struct oxu_hcd *oxu,
  1771. unsigned frame, unsigned uframe,
  1772. const struct ehci_qh *qh, __le32 *c_maskp)
  1773. {
  1774. int retval = -ENOSPC;
  1775. if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
  1776. goto done;
  1777. if (!check_period(oxu, frame, uframe, qh->period, qh->usecs))
  1778. goto done;
  1779. if (!qh->c_usecs) {
  1780. retval = 0;
  1781. *c_maskp = 0;
  1782. goto done;
  1783. }
  1784. done:
  1785. return retval;
  1786. }
  1787. /* "first fit" scheduling policy used the first time through,
  1788. * or when the previous schedule slot can't be re-used.
  1789. */
  1790. static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
  1791. {
  1792. int status;
  1793. unsigned uframe;
  1794. __le32 c_mask;
  1795. unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
  1796. qh_refresh(oxu, qh);
  1797. qh->hw_next = EHCI_LIST_END;
  1798. frame = qh->start;
  1799. /* reuse the previous schedule slots, if we can */
  1800. if (frame < qh->period) {
  1801. uframe = ffs(le32_to_cpup(&qh->hw_info2) & QH_SMASK);
  1802. status = check_intr_schedule(oxu, frame, --uframe,
  1803. qh, &c_mask);
  1804. } else {
  1805. uframe = 0;
  1806. c_mask = 0;
  1807. status = -ENOSPC;
  1808. }
  1809. /* else scan the schedule to find a group of slots such that all
  1810. * uframes have enough periodic bandwidth available.
  1811. */
  1812. if (status) {
  1813. /* "normal" case, uframing flexible except with splits */
  1814. if (qh->period) {
  1815. frame = qh->period - 1;
  1816. do {
  1817. for (uframe = 0; uframe < 8; uframe++) {
  1818. status = check_intr_schedule(oxu,
  1819. frame, uframe, qh,
  1820. &c_mask);
  1821. if (status == 0)
  1822. break;
  1823. }
  1824. } while (status && frame--);
  1825. /* qh->period == 0 means every uframe */
  1826. } else {
  1827. frame = 0;
  1828. status = check_intr_schedule(oxu, 0, 0, qh, &c_mask);
  1829. }
  1830. if (status)
  1831. goto done;
  1832. qh->start = frame;
  1833. /* reset S-frame and (maybe) C-frame masks */
  1834. qh->hw_info2 &= cpu_to_le32(~(QH_CMASK | QH_SMASK));
  1835. qh->hw_info2 |= qh->period
  1836. ? cpu_to_le32(1 << uframe)
  1837. : cpu_to_le32(QH_SMASK);
  1838. qh->hw_info2 |= c_mask;
  1839. } else
  1840. oxu_dbg(oxu, "reused qh %p schedule\n", qh);
  1841. /* stuff into the periodic schedule */
  1842. status = qh_link_periodic(oxu, qh);
  1843. done:
  1844. return status;
  1845. }
  1846. static int intr_submit(struct oxu_hcd *oxu, struct urb *urb,
  1847. struct list_head *qtd_list, gfp_t mem_flags)
  1848. {
  1849. unsigned epnum;
  1850. unsigned long flags;
  1851. struct ehci_qh *qh;
  1852. int status = 0;
  1853. struct list_head empty;
  1854. /* get endpoint and transfer/schedule data */
  1855. epnum = urb->ep->desc.bEndpointAddress;
  1856. spin_lock_irqsave(&oxu->lock, flags);
  1857. if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
  1858. status = -ESHUTDOWN;
  1859. goto done;
  1860. }
  1861. /* get qh and force any scheduling errors */
  1862. INIT_LIST_HEAD(&empty);
  1863. qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv);
  1864. if (qh == NULL) {
  1865. status = -ENOMEM;
  1866. goto done;
  1867. }
  1868. if (qh->qh_state == QH_STATE_IDLE) {
  1869. status = qh_schedule(oxu, qh);
  1870. if (status != 0)
  1871. goto done;
  1872. }
  1873. /* then queue the urb's tds to the qh */
  1874. qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
  1875. BUG_ON(qh == NULL);
  1876. /* ... update usbfs periodic stats */
  1877. oxu_to_hcd(oxu)->self.bandwidth_int_reqs++;
  1878. done:
  1879. spin_unlock_irqrestore(&oxu->lock, flags);
  1880. if (status)
  1881. qtd_list_free(oxu, urb, qtd_list);
  1882. return status;
  1883. }
  1884. static inline int itd_submit(struct oxu_hcd *oxu, struct urb *urb,
  1885. gfp_t mem_flags)
  1886. {
  1887. oxu_dbg(oxu, "iso support is missing!\n");
  1888. return -ENOSYS;
  1889. }
  1890. static inline int sitd_submit(struct oxu_hcd *oxu, struct urb *urb,
  1891. gfp_t mem_flags)
  1892. {
  1893. oxu_dbg(oxu, "split iso support is missing!\n");
  1894. return -ENOSYS;
  1895. }
  1896. static void scan_periodic(struct oxu_hcd *oxu)
  1897. {
  1898. unsigned frame, clock, now_uframe, mod;
  1899. unsigned modified;
  1900. mod = oxu->periodic_size << 3;
  1901. /*
  1902. * When running, scan from last scan point up to "now"
  1903. * else clean up by scanning everything that's left.
  1904. * Touches as few pages as possible: cache-friendly.
  1905. */
  1906. now_uframe = oxu->next_uframe;
  1907. if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
  1908. clock = readl(&oxu->regs->frame_index);
  1909. else
  1910. clock = now_uframe + mod - 1;
  1911. clock %= mod;
  1912. for (;;) {
  1913. union ehci_shadow q, *q_p;
  1914. __le32 type, *hw_p;
  1915. unsigned uframes;
  1916. /* don't scan past the live uframe */
  1917. frame = now_uframe >> 3;
  1918. if (frame == (clock >> 3))
  1919. uframes = now_uframe & 0x07;
  1920. else {
  1921. /* safe to scan the whole frame at once */
  1922. now_uframe |= 0x07;
  1923. uframes = 8;
  1924. }
  1925. restart:
  1926. /* scan each element in frame's queue for completions */
  1927. q_p = &oxu->pshadow[frame];
  1928. hw_p = &oxu->periodic[frame];
  1929. q.ptr = q_p->ptr;
  1930. type = Q_NEXT_TYPE(*hw_p);
  1931. modified = 0;
  1932. while (q.ptr != NULL) {
  1933. union ehci_shadow temp;
  1934. int live;
  1935. live = HC_IS_RUNNING(oxu_to_hcd(oxu)->state);
  1936. switch (type) {
  1937. case Q_TYPE_QH:
  1938. /* handle any completions */
  1939. temp.qh = qh_get(q.qh);
  1940. type = Q_NEXT_TYPE(q.qh->hw_next);
  1941. q = q.qh->qh_next;
  1942. modified = qh_completions(oxu, temp.qh);
  1943. if (unlikely(list_empty(&temp.qh->qtd_list)))
  1944. intr_deschedule(oxu, temp.qh);
  1945. qh_put(temp.qh);
  1946. break;
  1947. default:
  1948. oxu_dbg(oxu, "corrupt type %d frame %d shadow %p\n",
  1949. type, frame, q.ptr);
  1950. q.ptr = NULL;
  1951. }
  1952. /* assume completion callbacks modify the queue */
  1953. if (unlikely(modified))
  1954. goto restart;
  1955. }
  1956. /* Stop when we catch up to the HC */
  1957. /* FIXME: this assumes we won't get lapped when
  1958. * latencies climb; that should be rare, but...
  1959. * detect it, and just go all the way around.
  1960. * FLR might help detect this case, so long as latencies
  1961. * don't exceed periodic_size msec (default 1.024 sec).
  1962. */
  1963. /* FIXME: likewise assumes HC doesn't halt mid-scan */
  1964. if (now_uframe == clock) {
  1965. unsigned now;
  1966. if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
  1967. break;
  1968. oxu->next_uframe = now_uframe;
  1969. now = readl(&oxu->regs->frame_index) % mod;
  1970. if (now_uframe == now)
  1971. break;
  1972. /* rescan the rest of this frame, then ... */
  1973. clock = now;
  1974. } else {
  1975. now_uframe++;
  1976. now_uframe %= mod;
  1977. }
  1978. }
  1979. }
  1980. /* On some systems, leaving remote wakeup enabled prevents system shutdown.
  1981. * The firmware seems to think that powering off is a wakeup event!
  1982. * This routine turns off remote wakeup and everything else, on all ports.
  1983. */
  1984. static void ehci_turn_off_all_ports(struct oxu_hcd *oxu)
  1985. {
  1986. int port = HCS_N_PORTS(oxu->hcs_params);
  1987. while (port--)
  1988. writel(PORT_RWC_BITS, &oxu->regs->port_status[port]);
  1989. }
  1990. static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
  1991. {
  1992. unsigned port;
  1993. if (!HCS_PPC(oxu->hcs_params))
  1994. return;
  1995. oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
  1996. for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; )
  1997. (void) oxu_hub_control(oxu_to_hcd(oxu),
  1998. is_on ? SetPortFeature : ClearPortFeature,
  1999. USB_PORT_FEAT_POWER,
  2000. port--, NULL, 0);
  2001. msleep(20);
  2002. }
  2003. /* Called from some interrupts, timers, and so on.
  2004. * It calls driver completion functions, after dropping oxu->lock.
  2005. */
  2006. static void ehci_work(struct oxu_hcd *oxu)
  2007. {
  2008. timer_action_done(oxu, TIMER_IO_WATCHDOG);
  2009. if (oxu->reclaim_ready)
  2010. end_unlink_async(oxu);
  2011. /* another CPU may drop oxu->lock during a schedule scan while
  2012. * it reports urb completions. this flag guards against bogus
  2013. * attempts at re-entrant schedule scanning.
  2014. */
  2015. if (oxu->scanning)
  2016. return;
  2017. oxu->scanning = 1;
  2018. scan_async(oxu);
  2019. if (oxu->next_uframe != -1)
  2020. scan_periodic(oxu);
  2021. oxu->scanning = 0;
  2022. /* the IO watchdog guards against hardware or driver bugs that
  2023. * misplace IRQs, and should let us run completely without IRQs.
  2024. * such lossage has been observed on both VT6202 and VT8235.
  2025. */
  2026. if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) &&
  2027. (oxu->async->qh_next.ptr != NULL ||
  2028. oxu->periodic_sched != 0))
  2029. timer_action(oxu, TIMER_IO_WATCHDOG);
  2030. }
  2031. static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
  2032. {
  2033. /* if we need to use IAA and it's busy, defer */
  2034. if (qh->qh_state == QH_STATE_LINKED
  2035. && oxu->reclaim
  2036. && HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) {
  2037. struct ehci_qh *last;
  2038. for (last = oxu->reclaim;
  2039. last->reclaim;
  2040. last = last->reclaim)
  2041. continue;
  2042. qh->qh_state = QH_STATE_UNLINK_WAIT;
  2043. last->reclaim = qh;
  2044. /* bypass IAA if the hc can't care */
  2045. } else if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && oxu->reclaim)
  2046. end_unlink_async(oxu);
  2047. /* something else might have unlinked the qh by now */
  2048. if (qh->qh_state == QH_STATE_LINKED)
  2049. start_unlink_async(oxu, qh);
  2050. }
  2051. /*
  2052. * USB host controller methods
  2053. */
  2054. static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
  2055. {
  2056. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2057. u32 status, pcd_status = 0;
  2058. int bh;
  2059. spin_lock(&oxu->lock);
  2060. status = readl(&oxu->regs->status);
  2061. /* e.g. cardbus physical eject */
  2062. if (status == ~(u32) 0) {
  2063. oxu_dbg(oxu, "device removed\n");
  2064. goto dead;
  2065. }
  2066. /* Shared IRQ? */
  2067. status &= INTR_MASK;
  2068. if (!status || unlikely(hcd->state == HC_STATE_HALT)) {
  2069. spin_unlock(&oxu->lock);
  2070. return IRQ_NONE;
  2071. }
  2072. /* clear (just) interrupts */
  2073. writel(status, &oxu->regs->status);
  2074. readl(&oxu->regs->command); /* unblock posted write */
  2075. bh = 0;
  2076. #ifdef OXU_VERBOSE_DEBUG
  2077. /* unrequested/ignored: Frame List Rollover */
  2078. dbg_status(oxu, "irq", status);
  2079. #endif
  2080. /* INT, ERR, and IAA interrupt rates can be throttled */
  2081. /* normal [4.15.1.2] or error [4.15.1.1] completion */
  2082. if (likely((status & (STS_INT|STS_ERR)) != 0))
  2083. bh = 1;
  2084. /* complete the unlinking of some qh [4.15.2.3] */
  2085. if (status & STS_IAA) {
  2086. oxu->reclaim_ready = 1;
  2087. bh = 1;
  2088. }
  2089. /* remote wakeup [4.3.1] */
  2090. if (status & STS_PCD) {
  2091. unsigned i = HCS_N_PORTS(oxu->hcs_params);
  2092. pcd_status = status;
  2093. /* resume root hub? */
  2094. if (!(readl(&oxu->regs->command) & CMD_RUN))
  2095. usb_hcd_resume_root_hub(hcd);
  2096. while (i--) {
  2097. int pstatus = readl(&oxu->regs->port_status[i]);
  2098. if (pstatus & PORT_OWNER)
  2099. continue;
  2100. if (!(pstatus & PORT_RESUME)
  2101. || oxu->reset_done[i] != 0)
  2102. continue;
  2103. /* start USB_RESUME_TIMEOUT resume signaling from this
  2104. * port, and make hub_wq collect PORT_STAT_C_SUSPEND to
  2105. * stop that signaling.
  2106. */
  2107. oxu->reset_done[i] = jiffies +
  2108. msecs_to_jiffies(USB_RESUME_TIMEOUT);
  2109. oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
  2110. mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
  2111. }
  2112. }
  2113. /* PCI errors [4.15.2.4] */
  2114. if (unlikely((status & STS_FATAL) != 0)) {
  2115. /* bogus "fatal" IRQs appear on some chips... why? */
  2116. status = readl(&oxu->regs->status);
  2117. dbg_cmd(oxu, "fatal", readl(&oxu->regs->command));
  2118. dbg_status(oxu, "fatal", status);
  2119. if (status & STS_HALT) {
  2120. oxu_err(oxu, "fatal error\n");
  2121. dead:
  2122. ehci_reset(oxu);
  2123. writel(0, &oxu->regs->configured_flag);
  2124. usb_hc_died(hcd);
  2125. /* generic layer kills/unlinks all urbs, then
  2126. * uses oxu_stop to clean up the rest
  2127. */
  2128. bh = 1;
  2129. }
  2130. }
  2131. if (bh)
  2132. ehci_work(oxu);
  2133. spin_unlock(&oxu->lock);
  2134. if (pcd_status & STS_PCD)
  2135. usb_hcd_poll_rh_status(hcd);
  2136. return IRQ_HANDLED;
  2137. }
  2138. static irqreturn_t oxu_irq(struct usb_hcd *hcd)
  2139. {
  2140. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2141. int ret = IRQ_HANDLED;
  2142. u32 status = oxu_readl(hcd->regs, OXU_CHIPIRQSTATUS);
  2143. u32 enable = oxu_readl(hcd->regs, OXU_CHIPIRQEN_SET);
  2144. /* Disable all interrupt */
  2145. oxu_writel(hcd->regs, OXU_CHIPIRQEN_CLR, enable);
  2146. if ((oxu->is_otg && (status & OXU_USBOTGI)) ||
  2147. (!oxu->is_otg && (status & OXU_USBSPHI)))
  2148. oxu210_hcd_irq(hcd);
  2149. else
  2150. ret = IRQ_NONE;
  2151. /* Enable all interrupt back */
  2152. oxu_writel(hcd->regs, OXU_CHIPIRQEN_SET, enable);
  2153. return ret;
  2154. }
  2155. static void oxu_watchdog(unsigned long param)
  2156. {
  2157. struct oxu_hcd *oxu = (struct oxu_hcd *) param;
  2158. unsigned long flags;
  2159. spin_lock_irqsave(&oxu->lock, flags);
  2160. /* lost IAA irqs wedge things badly; seen with a vt8235 */
  2161. if (oxu->reclaim) {
  2162. u32 status = readl(&oxu->regs->status);
  2163. if (status & STS_IAA) {
  2164. oxu_vdbg(oxu, "lost IAA\n");
  2165. writel(STS_IAA, &oxu->regs->status);
  2166. oxu->reclaim_ready = 1;
  2167. }
  2168. }
  2169. /* stop async processing after it's idled a bit */
  2170. if (test_bit(TIMER_ASYNC_OFF, &oxu->actions))
  2171. start_unlink_async(oxu, oxu->async);
  2172. /* oxu could run by timer, without IRQs ... */
  2173. ehci_work(oxu);
  2174. spin_unlock_irqrestore(&oxu->lock, flags);
  2175. }
  2176. /* One-time init, only for memory state.
  2177. */
  2178. static int oxu_hcd_init(struct usb_hcd *hcd)
  2179. {
  2180. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2181. u32 temp;
  2182. int retval;
  2183. u32 hcc_params;
  2184. spin_lock_init(&oxu->lock);
  2185. setup_timer(&oxu->watchdog, oxu_watchdog, (unsigned long)oxu);
  2186. /*
  2187. * hw default: 1K periodic list heads, one per frame.
  2188. * periodic_size can shrink by USBCMD update if hcc_params allows.
  2189. */
  2190. oxu->periodic_size = DEFAULT_I_TDPS;
  2191. retval = ehci_mem_init(oxu, GFP_KERNEL);
  2192. if (retval < 0)
  2193. return retval;
  2194. /* controllers may cache some of the periodic schedule ... */
  2195. hcc_params = readl(&oxu->caps->hcc_params);
  2196. if (HCC_ISOC_CACHE(hcc_params)) /* full frame cache */
  2197. oxu->i_thresh = 8;
  2198. else /* N microframes cached */
  2199. oxu->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
  2200. oxu->reclaim = NULL;
  2201. oxu->reclaim_ready = 0;
  2202. oxu->next_uframe = -1;
  2203. /*
  2204. * dedicate a qh for the async ring head, since we couldn't unlink
  2205. * a 'real' qh without stopping the async schedule [4.8]. use it
  2206. * as the 'reclamation list head' too.
  2207. * its dummy is used in hw_alt_next of many tds, to prevent the qh
  2208. * from automatically advancing to the next td after short reads.
  2209. */
  2210. oxu->async->qh_next.qh = NULL;
  2211. oxu->async->hw_next = QH_NEXT(oxu->async->qh_dma);
  2212. oxu->async->hw_info1 = cpu_to_le32(QH_HEAD);
  2213. oxu->async->hw_token = cpu_to_le32(QTD_STS_HALT);
  2214. oxu->async->hw_qtd_next = EHCI_LIST_END;
  2215. oxu->async->qh_state = QH_STATE_LINKED;
  2216. oxu->async->hw_alt_next = QTD_NEXT(oxu->async->dummy->qtd_dma);
  2217. /* clear interrupt enables, set irq latency */
  2218. if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
  2219. log2_irq_thresh = 0;
  2220. temp = 1 << (16 + log2_irq_thresh);
  2221. if (HCC_CANPARK(hcc_params)) {
  2222. /* HW default park == 3, on hardware that supports it (like
  2223. * NVidia and ALI silicon), maximizes throughput on the async
  2224. * schedule by avoiding QH fetches between transfers.
  2225. *
  2226. * With fast usb storage devices and NForce2, "park" seems to
  2227. * make problems: throughput reduction (!), data errors...
  2228. */
  2229. if (park) {
  2230. park = min(park, (unsigned) 3);
  2231. temp |= CMD_PARK;
  2232. temp |= park << 8;
  2233. }
  2234. oxu_dbg(oxu, "park %d\n", park);
  2235. }
  2236. if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
  2237. /* periodic schedule size can be smaller than default */
  2238. temp &= ~(3 << 2);
  2239. temp |= (EHCI_TUNE_FLS << 2);
  2240. }
  2241. oxu->command = temp;
  2242. return 0;
  2243. }
  2244. /* Called during probe() after chip reset completes.
  2245. */
  2246. static int oxu_reset(struct usb_hcd *hcd)
  2247. {
  2248. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2249. int ret;
  2250. spin_lock_init(&oxu->mem_lock);
  2251. INIT_LIST_HEAD(&oxu->urb_list);
  2252. oxu->urb_len = 0;
  2253. /* FIMXE */
  2254. hcd->self.controller->dma_mask = NULL;
  2255. if (oxu->is_otg) {
  2256. oxu->caps = hcd->regs + OXU_OTG_CAP_OFFSET;
  2257. oxu->regs = hcd->regs + OXU_OTG_CAP_OFFSET + \
  2258. HC_LENGTH(readl(&oxu->caps->hc_capbase));
  2259. oxu->mem = hcd->regs + OXU_SPH_MEM;
  2260. } else {
  2261. oxu->caps = hcd->regs + OXU_SPH_CAP_OFFSET;
  2262. oxu->regs = hcd->regs + OXU_SPH_CAP_OFFSET + \
  2263. HC_LENGTH(readl(&oxu->caps->hc_capbase));
  2264. oxu->mem = hcd->regs + OXU_OTG_MEM;
  2265. }
  2266. oxu->hcs_params = readl(&oxu->caps->hcs_params);
  2267. oxu->sbrn = 0x20;
  2268. ret = oxu_hcd_init(hcd);
  2269. if (ret)
  2270. return ret;
  2271. return 0;
  2272. }
  2273. static int oxu_run(struct usb_hcd *hcd)
  2274. {
  2275. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2276. int retval;
  2277. u32 temp, hcc_params;
  2278. hcd->uses_new_polling = 1;
  2279. /* EHCI spec section 4.1 */
  2280. retval = ehci_reset(oxu);
  2281. if (retval != 0) {
  2282. ehci_mem_cleanup(oxu);
  2283. return retval;
  2284. }
  2285. writel(oxu->periodic_dma, &oxu->regs->frame_list);
  2286. writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
  2287. /* hcc_params controls whether oxu->regs->segment must (!!!)
  2288. * be used; it constrains QH/ITD/SITD and QTD locations.
  2289. * pci_pool consistent memory always uses segment zero.
  2290. * streaming mappings for I/O buffers, like pci_map_single(),
  2291. * can return segments above 4GB, if the device allows.
  2292. *
  2293. * NOTE: the dma mask is visible through dma_supported(), so
  2294. * drivers can pass this info along ... like NETIF_F_HIGHDMA,
  2295. * Scsi_Host.highmem_io, and so forth. It's readonly to all
  2296. * host side drivers though.
  2297. */
  2298. hcc_params = readl(&oxu->caps->hcc_params);
  2299. if (HCC_64BIT_ADDR(hcc_params))
  2300. writel(0, &oxu->regs->segment);
  2301. oxu->command &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE |
  2302. CMD_ASE | CMD_RESET);
  2303. oxu->command |= CMD_RUN;
  2304. writel(oxu->command, &oxu->regs->command);
  2305. dbg_cmd(oxu, "init", oxu->command);
  2306. /*
  2307. * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
  2308. * are explicitly handed to companion controller(s), so no TT is
  2309. * involved with the root hub. (Except where one is integrated,
  2310. * and there's no companion controller unless maybe for USB OTG.)
  2311. */
  2312. hcd->state = HC_STATE_RUNNING;
  2313. writel(FLAG_CF, &oxu->regs->configured_flag);
  2314. readl(&oxu->regs->command); /* unblock posted writes */
  2315. temp = HC_VERSION(readl(&oxu->caps->hc_capbase));
  2316. oxu_info(oxu, "USB %x.%x started, quasi-EHCI %x.%02x, driver %s%s\n",
  2317. ((oxu->sbrn & 0xf0)>>4), (oxu->sbrn & 0x0f),
  2318. temp >> 8, temp & 0xff, DRIVER_VERSION,
  2319. ignore_oc ? ", overcurrent ignored" : "");
  2320. writel(INTR_MASK, &oxu->regs->intr_enable); /* Turn On Interrupts */
  2321. return 0;
  2322. }
  2323. static void oxu_stop(struct usb_hcd *hcd)
  2324. {
  2325. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2326. /* Turn off port power on all root hub ports. */
  2327. ehci_port_power(oxu, 0);
  2328. /* no more interrupts ... */
  2329. del_timer_sync(&oxu->watchdog);
  2330. spin_lock_irq(&oxu->lock);
  2331. if (HC_IS_RUNNING(hcd->state))
  2332. ehci_quiesce(oxu);
  2333. ehci_reset(oxu);
  2334. writel(0, &oxu->regs->intr_enable);
  2335. spin_unlock_irq(&oxu->lock);
  2336. /* let companion controllers work when we aren't */
  2337. writel(0, &oxu->regs->configured_flag);
  2338. /* root hub is shut down separately (first, when possible) */
  2339. spin_lock_irq(&oxu->lock);
  2340. if (oxu->async)
  2341. ehci_work(oxu);
  2342. spin_unlock_irq(&oxu->lock);
  2343. ehci_mem_cleanup(oxu);
  2344. dbg_status(oxu, "oxu_stop completed", readl(&oxu->regs->status));
  2345. }
  2346. /* Kick in for silicon on any bus (not just pci, etc).
  2347. * This forcibly disables dma and IRQs, helping kexec and other cases
  2348. * where the next system software may expect clean state.
  2349. */
  2350. static void oxu_shutdown(struct usb_hcd *hcd)
  2351. {
  2352. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2353. (void) ehci_halt(oxu);
  2354. ehci_turn_off_all_ports(oxu);
  2355. /* make BIOS/etc use companion controller during reboot */
  2356. writel(0, &oxu->regs->configured_flag);
  2357. /* unblock posted writes */
  2358. readl(&oxu->regs->configured_flag);
  2359. }
  2360. /* Non-error returns are a promise to giveback() the urb later
  2361. * we drop ownership so next owner (or urb unlink) can get it
  2362. *
  2363. * urb + dev is in hcd.self.controller.urb_list
  2364. * we're queueing TDs onto software and hardware lists
  2365. *
  2366. * hcd-specific init for hcpriv hasn't been done yet
  2367. *
  2368. * NOTE: control, bulk, and interrupt share the same code to append TDs
  2369. * to a (possibly active) QH, and the same QH scanning code.
  2370. */
  2371. static int __oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
  2372. gfp_t mem_flags)
  2373. {
  2374. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2375. struct list_head qtd_list;
  2376. INIT_LIST_HEAD(&qtd_list);
  2377. switch (usb_pipetype(urb->pipe)) {
  2378. case PIPE_CONTROL:
  2379. case PIPE_BULK:
  2380. default:
  2381. if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
  2382. return -ENOMEM;
  2383. return submit_async(oxu, urb, &qtd_list, mem_flags);
  2384. case PIPE_INTERRUPT:
  2385. if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
  2386. return -ENOMEM;
  2387. return intr_submit(oxu, urb, &qtd_list, mem_flags);
  2388. case PIPE_ISOCHRONOUS:
  2389. if (urb->dev->speed == USB_SPEED_HIGH)
  2390. return itd_submit(oxu, urb, mem_flags);
  2391. else
  2392. return sitd_submit(oxu, urb, mem_flags);
  2393. }
  2394. }
  2395. /* This function is responsible for breaking URBs with big data size
  2396. * into smaller size and processing small urbs in sequence.
  2397. */
  2398. static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
  2399. gfp_t mem_flags)
  2400. {
  2401. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2402. int num, rem;
  2403. int transfer_buffer_length;
  2404. void *transfer_buffer;
  2405. struct urb *murb;
  2406. int i, ret;
  2407. /* If not bulk pipe just enqueue the URB */
  2408. if (!usb_pipebulk(urb->pipe))
  2409. return __oxu_urb_enqueue(hcd, urb, mem_flags);
  2410. /* Otherwise we should verify the USB transfer buffer size! */
  2411. transfer_buffer = urb->transfer_buffer;
  2412. transfer_buffer_length = urb->transfer_buffer_length;
  2413. num = urb->transfer_buffer_length / 4096;
  2414. rem = urb->transfer_buffer_length % 4096;
  2415. if (rem != 0)
  2416. num++;
  2417. /* If URB is smaller than 4096 bytes just enqueue it! */
  2418. if (num == 1)
  2419. return __oxu_urb_enqueue(hcd, urb, mem_flags);
  2420. /* Ok, we have more job to do! :) */
  2421. for (i = 0; i < num - 1; i++) {
  2422. /* Get free micro URB poll till a free urb is received */
  2423. do {
  2424. murb = (struct urb *) oxu_murb_alloc(oxu);
  2425. if (!murb)
  2426. schedule();
  2427. } while (!murb);
  2428. /* Coping the urb */
  2429. memcpy(murb, urb, sizeof(struct urb));
  2430. murb->transfer_buffer_length = 4096;
  2431. murb->transfer_buffer = transfer_buffer + i * 4096;
  2432. /* Null pointer for the encodes that this is a micro urb */
  2433. murb->complete = NULL;
  2434. ((struct oxu_murb *) murb)->main = urb;
  2435. ((struct oxu_murb *) murb)->last = 0;
  2436. /* This loop is to guarantee urb to be processed when there's
  2437. * not enough resources at a particular time by retrying.
  2438. */
  2439. do {
  2440. ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
  2441. if (ret)
  2442. schedule();
  2443. } while (ret);
  2444. }
  2445. /* Last urb requires special handling */
  2446. /* Get free micro URB poll till a free urb is received */
  2447. do {
  2448. murb = (struct urb *) oxu_murb_alloc(oxu);
  2449. if (!murb)
  2450. schedule();
  2451. } while (!murb);
  2452. /* Coping the urb */
  2453. memcpy(murb, urb, sizeof(struct urb));
  2454. murb->transfer_buffer_length = rem > 0 ? rem : 4096;
  2455. murb->transfer_buffer = transfer_buffer + (num - 1) * 4096;
  2456. /* Null pointer for the encodes that this is a micro urb */
  2457. murb->complete = NULL;
  2458. ((struct oxu_murb *) murb)->main = urb;
  2459. ((struct oxu_murb *) murb)->last = 1;
  2460. do {
  2461. ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
  2462. if (ret)
  2463. schedule();
  2464. } while (ret);
  2465. return ret;
  2466. }
  2467. /* Remove from hardware lists.
  2468. * Completions normally happen asynchronously
  2469. */
  2470. static int oxu_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
  2471. {
  2472. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2473. struct ehci_qh *qh;
  2474. unsigned long flags;
  2475. spin_lock_irqsave(&oxu->lock, flags);
  2476. switch (usb_pipetype(urb->pipe)) {
  2477. case PIPE_CONTROL:
  2478. case PIPE_BULK:
  2479. default:
  2480. qh = (struct ehci_qh *) urb->hcpriv;
  2481. if (!qh)
  2482. break;
  2483. unlink_async(oxu, qh);
  2484. break;
  2485. case PIPE_INTERRUPT:
  2486. qh = (struct ehci_qh *) urb->hcpriv;
  2487. if (!qh)
  2488. break;
  2489. switch (qh->qh_state) {
  2490. case QH_STATE_LINKED:
  2491. intr_deschedule(oxu, qh);
  2492. /* FALL THROUGH */
  2493. case QH_STATE_IDLE:
  2494. qh_completions(oxu, qh);
  2495. break;
  2496. default:
  2497. oxu_dbg(oxu, "bogus qh %p state %d\n",
  2498. qh, qh->qh_state);
  2499. goto done;
  2500. }
  2501. /* reschedule QH iff another request is queued */
  2502. if (!list_empty(&qh->qtd_list)
  2503. && HC_IS_RUNNING(hcd->state)) {
  2504. int status;
  2505. status = qh_schedule(oxu, qh);
  2506. spin_unlock_irqrestore(&oxu->lock, flags);
  2507. if (status != 0) {
  2508. /* shouldn't happen often, but ...
  2509. * FIXME kill those tds' urbs
  2510. */
  2511. dev_err(hcd->self.controller,
  2512. "can't reschedule qh %p, err %d\n", qh,
  2513. status);
  2514. }
  2515. return status;
  2516. }
  2517. break;
  2518. }
  2519. done:
  2520. spin_unlock_irqrestore(&oxu->lock, flags);
  2521. return 0;
  2522. }
  2523. /* Bulk qh holds the data toggle */
  2524. static void oxu_endpoint_disable(struct usb_hcd *hcd,
  2525. struct usb_host_endpoint *ep)
  2526. {
  2527. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2528. unsigned long flags;
  2529. struct ehci_qh *qh, *tmp;
  2530. /* ASSERT: any requests/urbs are being unlinked */
  2531. /* ASSERT: nobody can be submitting urbs for this any more */
  2532. rescan:
  2533. spin_lock_irqsave(&oxu->lock, flags);
  2534. qh = ep->hcpriv;
  2535. if (!qh)
  2536. goto done;
  2537. /* endpoints can be iso streams. for now, we don't
  2538. * accelerate iso completions ... so spin a while.
  2539. */
  2540. if (qh->hw_info1 == 0) {
  2541. oxu_vdbg(oxu, "iso delay\n");
  2542. goto idle_timeout;
  2543. }
  2544. if (!HC_IS_RUNNING(hcd->state))
  2545. qh->qh_state = QH_STATE_IDLE;
  2546. switch (qh->qh_state) {
  2547. case QH_STATE_LINKED:
  2548. for (tmp = oxu->async->qh_next.qh;
  2549. tmp && tmp != qh;
  2550. tmp = tmp->qh_next.qh)
  2551. continue;
  2552. /* periodic qh self-unlinks on empty */
  2553. if (!tmp)
  2554. goto nogood;
  2555. unlink_async(oxu, qh);
  2556. /* FALL THROUGH */
  2557. case QH_STATE_UNLINK: /* wait for hw to finish? */
  2558. idle_timeout:
  2559. spin_unlock_irqrestore(&oxu->lock, flags);
  2560. schedule_timeout_uninterruptible(1);
  2561. goto rescan;
  2562. case QH_STATE_IDLE: /* fully unlinked */
  2563. if (list_empty(&qh->qtd_list)) {
  2564. qh_put(qh);
  2565. break;
  2566. }
  2567. /* else FALL THROUGH */
  2568. default:
  2569. nogood:
  2570. /* caller was supposed to have unlinked any requests;
  2571. * that's not our job. just leak this memory.
  2572. */
  2573. oxu_err(oxu, "qh %p (#%02x) state %d%s\n",
  2574. qh, ep->desc.bEndpointAddress, qh->qh_state,
  2575. list_empty(&qh->qtd_list) ? "" : "(has tds)");
  2576. break;
  2577. }
  2578. ep->hcpriv = NULL;
  2579. done:
  2580. spin_unlock_irqrestore(&oxu->lock, flags);
  2581. }
  2582. static int oxu_get_frame(struct usb_hcd *hcd)
  2583. {
  2584. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2585. return (readl(&oxu->regs->frame_index) >> 3) %
  2586. oxu->periodic_size;
  2587. }
  2588. /* Build "status change" packet (one or two bytes) from HC registers */
  2589. static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
  2590. {
  2591. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2592. u32 temp, mask, status = 0;
  2593. int ports, i, retval = 1;
  2594. unsigned long flags;
  2595. /* if !PM, root hub timers won't get shut down ... */
  2596. if (!HC_IS_RUNNING(hcd->state))
  2597. return 0;
  2598. /* init status to no-changes */
  2599. buf[0] = 0;
  2600. ports = HCS_N_PORTS(oxu->hcs_params);
  2601. if (ports > 7) {
  2602. buf[1] = 0;
  2603. retval++;
  2604. }
  2605. /* Some boards (mostly VIA?) report bogus overcurrent indications,
  2606. * causing massive log spam unless we completely ignore them. It
  2607. * may be relevant that VIA VT8235 controllers, where PORT_POWER is
  2608. * always set, seem to clear PORT_OCC and PORT_CSC when writing to
  2609. * PORT_POWER; that's surprising, but maybe within-spec.
  2610. */
  2611. if (!ignore_oc)
  2612. mask = PORT_CSC | PORT_PEC | PORT_OCC;
  2613. else
  2614. mask = PORT_CSC | PORT_PEC;
  2615. /* no hub change reports (bit 0) for now (power, ...) */
  2616. /* port N changes (bit N)? */
  2617. spin_lock_irqsave(&oxu->lock, flags);
  2618. for (i = 0; i < ports; i++) {
  2619. temp = readl(&oxu->regs->port_status[i]);
  2620. /*
  2621. * Return status information even for ports with OWNER set.
  2622. * Otherwise hub_wq wouldn't see the disconnect event when a
  2623. * high-speed device is switched over to the companion
  2624. * controller by the user.
  2625. */
  2626. if (!(temp & PORT_CONNECT))
  2627. oxu->reset_done[i] = 0;
  2628. if ((temp & mask) != 0 || ((temp & PORT_RESUME) != 0 &&
  2629. time_after_eq(jiffies, oxu->reset_done[i]))) {
  2630. if (i < 7)
  2631. buf[0] |= 1 << (i + 1);
  2632. else
  2633. buf[1] |= 1 << (i - 7);
  2634. status = STS_PCD;
  2635. }
  2636. }
  2637. /* FIXME autosuspend idle root hubs */
  2638. spin_unlock_irqrestore(&oxu->lock, flags);
  2639. return status ? retval : 0;
  2640. }
  2641. /* Returns the speed of a device attached to a port on the root hub. */
  2642. static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu,
  2643. unsigned int portsc)
  2644. {
  2645. switch ((portsc >> 26) & 3) {
  2646. case 0:
  2647. return 0;
  2648. case 1:
  2649. return USB_PORT_STAT_LOW_SPEED;
  2650. case 2:
  2651. default:
  2652. return USB_PORT_STAT_HIGH_SPEED;
  2653. }
  2654. }
  2655. #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
  2656. static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
  2657. u16 wValue, u16 wIndex, char *buf, u16 wLength)
  2658. {
  2659. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2660. int ports = HCS_N_PORTS(oxu->hcs_params);
  2661. u32 __iomem *status_reg = &oxu->regs->port_status[wIndex - 1];
  2662. u32 temp, status;
  2663. unsigned long flags;
  2664. int retval = 0;
  2665. unsigned selector;
  2666. /*
  2667. * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
  2668. * HCS_INDICATOR may say we can change LEDs to off/amber/green.
  2669. * (track current state ourselves) ... blink for diagnostics,
  2670. * power, "this is the one", etc. EHCI spec supports this.
  2671. */
  2672. spin_lock_irqsave(&oxu->lock, flags);
  2673. switch (typeReq) {
  2674. case ClearHubFeature:
  2675. switch (wValue) {
  2676. case C_HUB_LOCAL_POWER:
  2677. case C_HUB_OVER_CURRENT:
  2678. /* no hub-wide feature/status flags */
  2679. break;
  2680. default:
  2681. goto error;
  2682. }
  2683. break;
  2684. case ClearPortFeature:
  2685. if (!wIndex || wIndex > ports)
  2686. goto error;
  2687. wIndex--;
  2688. temp = readl(status_reg);
  2689. /*
  2690. * Even if OWNER is set, so the port is owned by the
  2691. * companion controller, hub_wq needs to be able to clear
  2692. * the port-change status bits (especially
  2693. * USB_PORT_STAT_C_CONNECTION).
  2694. */
  2695. switch (wValue) {
  2696. case USB_PORT_FEAT_ENABLE:
  2697. writel(temp & ~PORT_PE, status_reg);
  2698. break;
  2699. case USB_PORT_FEAT_C_ENABLE:
  2700. writel((temp & ~PORT_RWC_BITS) | PORT_PEC, status_reg);
  2701. break;
  2702. case USB_PORT_FEAT_SUSPEND:
  2703. if (temp & PORT_RESET)
  2704. goto error;
  2705. if (temp & PORT_SUSPEND) {
  2706. if ((temp & PORT_PE) == 0)
  2707. goto error;
  2708. /* resume signaling for 20 msec */
  2709. temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
  2710. writel(temp | PORT_RESUME, status_reg);
  2711. oxu->reset_done[wIndex] = jiffies
  2712. + msecs_to_jiffies(20);
  2713. }
  2714. break;
  2715. case USB_PORT_FEAT_C_SUSPEND:
  2716. /* we auto-clear this feature */
  2717. break;
  2718. case USB_PORT_FEAT_POWER:
  2719. if (HCS_PPC(oxu->hcs_params))
  2720. writel(temp & ~(PORT_RWC_BITS | PORT_POWER),
  2721. status_reg);
  2722. break;
  2723. case USB_PORT_FEAT_C_CONNECTION:
  2724. writel((temp & ~PORT_RWC_BITS) | PORT_CSC, status_reg);
  2725. break;
  2726. case USB_PORT_FEAT_C_OVER_CURRENT:
  2727. writel((temp & ~PORT_RWC_BITS) | PORT_OCC, status_reg);
  2728. break;
  2729. case USB_PORT_FEAT_C_RESET:
  2730. /* GetPortStatus clears reset */
  2731. break;
  2732. default:
  2733. goto error;
  2734. }
  2735. readl(&oxu->regs->command); /* unblock posted write */
  2736. break;
  2737. case GetHubDescriptor:
  2738. ehci_hub_descriptor(oxu, (struct usb_hub_descriptor *)
  2739. buf);
  2740. break;
  2741. case GetHubStatus:
  2742. /* no hub-wide feature/status flags */
  2743. memset(buf, 0, 4);
  2744. break;
  2745. case GetPortStatus:
  2746. if (!wIndex || wIndex > ports)
  2747. goto error;
  2748. wIndex--;
  2749. status = 0;
  2750. temp = readl(status_reg);
  2751. /* wPortChange bits */
  2752. if (temp & PORT_CSC)
  2753. status |= USB_PORT_STAT_C_CONNECTION << 16;
  2754. if (temp & PORT_PEC)
  2755. status |= USB_PORT_STAT_C_ENABLE << 16;
  2756. if ((temp & PORT_OCC) && !ignore_oc)
  2757. status |= USB_PORT_STAT_C_OVERCURRENT << 16;
  2758. /* whoever resumes must GetPortStatus to complete it!! */
  2759. if (temp & PORT_RESUME) {
  2760. /* Remote Wakeup received? */
  2761. if (!oxu->reset_done[wIndex]) {
  2762. /* resume signaling for 20 msec */
  2763. oxu->reset_done[wIndex] = jiffies
  2764. + msecs_to_jiffies(20);
  2765. /* check the port again */
  2766. mod_timer(&oxu_to_hcd(oxu)->rh_timer,
  2767. oxu->reset_done[wIndex]);
  2768. }
  2769. /* resume completed? */
  2770. else if (time_after_eq(jiffies,
  2771. oxu->reset_done[wIndex])) {
  2772. status |= USB_PORT_STAT_C_SUSPEND << 16;
  2773. oxu->reset_done[wIndex] = 0;
  2774. /* stop resume signaling */
  2775. temp = readl(status_reg);
  2776. writel(temp & ~(PORT_RWC_BITS | PORT_RESUME),
  2777. status_reg);
  2778. retval = handshake(oxu, status_reg,
  2779. PORT_RESUME, 0, 2000 /* 2msec */);
  2780. if (retval != 0) {
  2781. oxu_err(oxu,
  2782. "port %d resume error %d\n",
  2783. wIndex + 1, retval);
  2784. goto error;
  2785. }
  2786. temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
  2787. }
  2788. }
  2789. /* whoever resets must GetPortStatus to complete it!! */
  2790. if ((temp & PORT_RESET)
  2791. && time_after_eq(jiffies,
  2792. oxu->reset_done[wIndex])) {
  2793. status |= USB_PORT_STAT_C_RESET << 16;
  2794. oxu->reset_done[wIndex] = 0;
  2795. /* force reset to complete */
  2796. writel(temp & ~(PORT_RWC_BITS | PORT_RESET),
  2797. status_reg);
  2798. /* REVISIT: some hardware needs 550+ usec to clear
  2799. * this bit; seems too long to spin routinely...
  2800. */
  2801. retval = handshake(oxu, status_reg,
  2802. PORT_RESET, 0, 750);
  2803. if (retval != 0) {
  2804. oxu_err(oxu, "port %d reset error %d\n",
  2805. wIndex + 1, retval);
  2806. goto error;
  2807. }
  2808. /* see what we found out */
  2809. temp = check_reset_complete(oxu, wIndex, status_reg,
  2810. readl(status_reg));
  2811. }
  2812. /* transfer dedicated ports to the companion hc */
  2813. if ((temp & PORT_CONNECT) &&
  2814. test_bit(wIndex, &oxu->companion_ports)) {
  2815. temp &= ~PORT_RWC_BITS;
  2816. temp |= PORT_OWNER;
  2817. writel(temp, status_reg);
  2818. oxu_dbg(oxu, "port %d --> companion\n", wIndex + 1);
  2819. temp = readl(status_reg);
  2820. }
  2821. /*
  2822. * Even if OWNER is set, there's no harm letting hub_wq
  2823. * see the wPortStatus values (they should all be 0 except
  2824. * for PORT_POWER anyway).
  2825. */
  2826. if (temp & PORT_CONNECT) {
  2827. status |= USB_PORT_STAT_CONNECTION;
  2828. /* status may be from integrated TT */
  2829. status |= oxu_port_speed(oxu, temp);
  2830. }
  2831. if (temp & PORT_PE)
  2832. status |= USB_PORT_STAT_ENABLE;
  2833. if (temp & (PORT_SUSPEND|PORT_RESUME))
  2834. status |= USB_PORT_STAT_SUSPEND;
  2835. if (temp & PORT_OC)
  2836. status |= USB_PORT_STAT_OVERCURRENT;
  2837. if (temp & PORT_RESET)
  2838. status |= USB_PORT_STAT_RESET;
  2839. if (temp & PORT_POWER)
  2840. status |= USB_PORT_STAT_POWER;
  2841. #ifndef OXU_VERBOSE_DEBUG
  2842. if (status & ~0xffff) /* only if wPortChange is interesting */
  2843. #endif
  2844. dbg_port(oxu, "GetStatus", wIndex + 1, temp);
  2845. put_unaligned(cpu_to_le32(status), (__le32 *) buf);
  2846. break;
  2847. case SetHubFeature:
  2848. switch (wValue) {
  2849. case C_HUB_LOCAL_POWER:
  2850. case C_HUB_OVER_CURRENT:
  2851. /* no hub-wide feature/status flags */
  2852. break;
  2853. default:
  2854. goto error;
  2855. }
  2856. break;
  2857. case SetPortFeature:
  2858. selector = wIndex >> 8;
  2859. wIndex &= 0xff;
  2860. if (!wIndex || wIndex > ports)
  2861. goto error;
  2862. wIndex--;
  2863. temp = readl(status_reg);
  2864. if (temp & PORT_OWNER)
  2865. break;
  2866. temp &= ~PORT_RWC_BITS;
  2867. switch (wValue) {
  2868. case USB_PORT_FEAT_SUSPEND:
  2869. if ((temp & PORT_PE) == 0
  2870. || (temp & PORT_RESET) != 0)
  2871. goto error;
  2872. if (device_may_wakeup(&hcd->self.root_hub->dev))
  2873. temp |= PORT_WAKE_BITS;
  2874. writel(temp | PORT_SUSPEND, status_reg);
  2875. break;
  2876. case USB_PORT_FEAT_POWER:
  2877. if (HCS_PPC(oxu->hcs_params))
  2878. writel(temp | PORT_POWER, status_reg);
  2879. break;
  2880. case USB_PORT_FEAT_RESET:
  2881. if (temp & PORT_RESUME)
  2882. goto error;
  2883. /* line status bits may report this as low speed,
  2884. * which can be fine if this root hub has a
  2885. * transaction translator built in.
  2886. */
  2887. oxu_vdbg(oxu, "port %d reset\n", wIndex + 1);
  2888. temp |= PORT_RESET;
  2889. temp &= ~PORT_PE;
  2890. /*
  2891. * caller must wait, then call GetPortStatus
  2892. * usb 2.0 spec says 50 ms resets on root
  2893. */
  2894. oxu->reset_done[wIndex] = jiffies
  2895. + msecs_to_jiffies(50);
  2896. writel(temp, status_reg);
  2897. break;
  2898. /* For downstream facing ports (these): one hub port is put
  2899. * into test mode according to USB2 11.24.2.13, then the hub
  2900. * must be reset (which for root hub now means rmmod+modprobe,
  2901. * or else system reboot). See EHCI 2.3.9 and 4.14 for info
  2902. * about the EHCI-specific stuff.
  2903. */
  2904. case USB_PORT_FEAT_TEST:
  2905. if (!selector || selector > 5)
  2906. goto error;
  2907. ehci_quiesce(oxu);
  2908. ehci_halt(oxu);
  2909. temp |= selector << 16;
  2910. writel(temp, status_reg);
  2911. break;
  2912. default:
  2913. goto error;
  2914. }
  2915. readl(&oxu->regs->command); /* unblock posted writes */
  2916. break;
  2917. default:
  2918. error:
  2919. /* "stall" on error */
  2920. retval = -EPIPE;
  2921. }
  2922. spin_unlock_irqrestore(&oxu->lock, flags);
  2923. return retval;
  2924. }
  2925. #ifdef CONFIG_PM
  2926. static int oxu_bus_suspend(struct usb_hcd *hcd)
  2927. {
  2928. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2929. int port;
  2930. int mask;
  2931. oxu_dbg(oxu, "suspend root hub\n");
  2932. if (time_before(jiffies, oxu->next_statechange))
  2933. msleep(5);
  2934. port = HCS_N_PORTS(oxu->hcs_params);
  2935. spin_lock_irq(&oxu->lock);
  2936. /* stop schedules, clean any completed work */
  2937. if (HC_IS_RUNNING(hcd->state)) {
  2938. ehci_quiesce(oxu);
  2939. hcd->state = HC_STATE_QUIESCING;
  2940. }
  2941. oxu->command = readl(&oxu->regs->command);
  2942. if (oxu->reclaim)
  2943. oxu->reclaim_ready = 1;
  2944. ehci_work(oxu);
  2945. /* Unlike other USB host controller types, EHCI doesn't have
  2946. * any notion of "global" or bus-wide suspend. The driver has
  2947. * to manually suspend all the active unsuspended ports, and
  2948. * then manually resume them in the bus_resume() routine.
  2949. */
  2950. oxu->bus_suspended = 0;
  2951. while (port--) {
  2952. u32 __iomem *reg = &oxu->regs->port_status[port];
  2953. u32 t1 = readl(reg) & ~PORT_RWC_BITS;
  2954. u32 t2 = t1;
  2955. /* keep track of which ports we suspend */
  2956. if ((t1 & PORT_PE) && !(t1 & PORT_OWNER) &&
  2957. !(t1 & PORT_SUSPEND)) {
  2958. t2 |= PORT_SUSPEND;
  2959. set_bit(port, &oxu->bus_suspended);
  2960. }
  2961. /* enable remote wakeup on all ports */
  2962. if (device_may_wakeup(&hcd->self.root_hub->dev))
  2963. t2 |= PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E;
  2964. else
  2965. t2 &= ~(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E);
  2966. if (t1 != t2) {
  2967. oxu_vdbg(oxu, "port %d, %08x -> %08x\n",
  2968. port + 1, t1, t2);
  2969. writel(t2, reg);
  2970. }
  2971. }
  2972. /* turn off now-idle HC */
  2973. del_timer_sync(&oxu->watchdog);
  2974. ehci_halt(oxu);
  2975. hcd->state = HC_STATE_SUSPENDED;
  2976. /* allow remote wakeup */
  2977. mask = INTR_MASK;
  2978. if (!device_may_wakeup(&hcd->self.root_hub->dev))
  2979. mask &= ~STS_PCD;
  2980. writel(mask, &oxu->regs->intr_enable);
  2981. readl(&oxu->regs->intr_enable);
  2982. oxu->next_statechange = jiffies + msecs_to_jiffies(10);
  2983. spin_unlock_irq(&oxu->lock);
  2984. return 0;
  2985. }
  2986. /* Caller has locked the root hub, and should reset/reinit on error */
  2987. static int oxu_bus_resume(struct usb_hcd *hcd)
  2988. {
  2989. struct oxu_hcd *oxu = hcd_to_oxu(hcd);
  2990. u32 temp;
  2991. int i;
  2992. if (time_before(jiffies, oxu->next_statechange))
  2993. msleep(5);
  2994. spin_lock_irq(&oxu->lock);
  2995. /* Ideally and we've got a real resume here, and no port's power
  2996. * was lost. (For PCI, that means Vaux was maintained.) But we
  2997. * could instead be restoring a swsusp snapshot -- so that BIOS was
  2998. * the last user of the controller, not reset/pm hardware keeping
  2999. * state we gave to it.
  3000. */
  3001. temp = readl(&oxu->regs->intr_enable);
  3002. oxu_dbg(oxu, "resume root hub%s\n", temp ? "" : " after power loss");
  3003. /* at least some APM implementations will try to deliver
  3004. * IRQs right away, so delay them until we're ready.
  3005. */
  3006. writel(0, &oxu->regs->intr_enable);
  3007. /* re-init operational registers */
  3008. writel(0, &oxu->regs->segment);
  3009. writel(oxu->periodic_dma, &oxu->regs->frame_list);
  3010. writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
  3011. /* restore CMD_RUN, framelist size, and irq threshold */
  3012. writel(oxu->command, &oxu->regs->command);
  3013. /* Some controller/firmware combinations need a delay during which
  3014. * they set up the port statuses. See Bugzilla #8190. */
  3015. mdelay(8);
  3016. /* manually resume the ports we suspended during bus_suspend() */
  3017. i = HCS_N_PORTS(oxu->hcs_params);
  3018. while (i--) {
  3019. temp = readl(&oxu->regs->port_status[i]);
  3020. temp &= ~(PORT_RWC_BITS
  3021. | PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E);
  3022. if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
  3023. oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
  3024. temp |= PORT_RESUME;
  3025. }
  3026. writel(temp, &oxu->regs->port_status[i]);
  3027. }
  3028. i = HCS_N_PORTS(oxu->hcs_params);
  3029. mdelay(20);
  3030. while (i--) {
  3031. temp = readl(&oxu->regs->port_status[i]);
  3032. if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
  3033. temp &= ~(PORT_RWC_BITS | PORT_RESUME);
  3034. writel(temp, &oxu->regs->port_status[i]);
  3035. oxu_vdbg(oxu, "resumed port %d\n", i + 1);
  3036. }
  3037. }
  3038. (void) readl(&oxu->regs->command);
  3039. /* maybe re-activate the schedule(s) */
  3040. temp = 0;
  3041. if (oxu->async->qh_next.qh)
  3042. temp |= CMD_ASE;
  3043. if (oxu->periodic_sched)
  3044. temp |= CMD_PSE;
  3045. if (temp) {
  3046. oxu->command |= temp;
  3047. writel(oxu->command, &oxu->regs->command);
  3048. }
  3049. oxu->next_statechange = jiffies + msecs_to_jiffies(5);
  3050. hcd->state = HC_STATE_RUNNING;
  3051. /* Now we can safely re-enable irqs */
  3052. writel(INTR_MASK, &oxu->regs->intr_enable);
  3053. spin_unlock_irq(&oxu->lock);
  3054. return 0;
  3055. }
  3056. #else
  3057. static int oxu_bus_suspend(struct usb_hcd *hcd)
  3058. {
  3059. return 0;
  3060. }
  3061. static int oxu_bus_resume(struct usb_hcd *hcd)
  3062. {
  3063. return 0;
  3064. }
  3065. #endif /* CONFIG_PM */
  3066. static const struct hc_driver oxu_hc_driver = {
  3067. .description = "oxu210hp_hcd",
  3068. .product_desc = "oxu210hp HCD",
  3069. .hcd_priv_size = sizeof(struct oxu_hcd),
  3070. /*
  3071. * Generic hardware linkage
  3072. */
  3073. .irq = oxu_irq,
  3074. .flags = HCD_MEMORY | HCD_USB2,
  3075. /*
  3076. * Basic lifecycle operations
  3077. */
  3078. .reset = oxu_reset,
  3079. .start = oxu_run,
  3080. .stop = oxu_stop,
  3081. .shutdown = oxu_shutdown,
  3082. /*
  3083. * Managing i/o requests and associated device resources
  3084. */
  3085. .urb_enqueue = oxu_urb_enqueue,
  3086. .urb_dequeue = oxu_urb_dequeue,
  3087. .endpoint_disable = oxu_endpoint_disable,
  3088. /*
  3089. * Scheduling support
  3090. */
  3091. .get_frame_number = oxu_get_frame,
  3092. /*
  3093. * Root hub support
  3094. */
  3095. .hub_status_data = oxu_hub_status_data,
  3096. .hub_control = oxu_hub_control,
  3097. .bus_suspend = oxu_bus_suspend,
  3098. .bus_resume = oxu_bus_resume,
  3099. };
  3100. /*
  3101. * Module stuff
  3102. */
  3103. static void oxu_configuration(struct platform_device *pdev, void *base)
  3104. {
  3105. u32 tmp;
  3106. /* Initialize top level registers.
  3107. * First write ever
  3108. */
  3109. oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
  3110. oxu_writel(base, OXU_SOFTRESET, OXU_SRESET);
  3111. oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
  3112. tmp = oxu_readl(base, OXU_PIOBURSTREADCTRL);
  3113. oxu_writel(base, OXU_PIOBURSTREADCTRL, tmp | 0x0040);
  3114. oxu_writel(base, OXU_ASO, OXU_SPHPOEN | OXU_OVRCCURPUPDEN |
  3115. OXU_COMPARATOR | OXU_ASO_OP);
  3116. tmp = oxu_readl(base, OXU_CLKCTRL_SET);
  3117. oxu_writel(base, OXU_CLKCTRL_SET, tmp | OXU_SYSCLKEN | OXU_USBOTGCLKEN);
  3118. /* Clear all top interrupt enable */
  3119. oxu_writel(base, OXU_CHIPIRQEN_CLR, 0xff);
  3120. /* Clear all top interrupt status */
  3121. oxu_writel(base, OXU_CHIPIRQSTATUS, 0xff);
  3122. /* Enable all needed top interrupt except OTG SPH core */
  3123. oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI);
  3124. }
  3125. static int oxu_verify_id(struct platform_device *pdev, void *base)
  3126. {
  3127. u32 id;
  3128. static const char * const bo[] = {
  3129. "reserved",
  3130. "128-pin LQFP",
  3131. "84-pin TFBGA",
  3132. "reserved",
  3133. };
  3134. /* Read controller signature register to find a match */
  3135. id = oxu_readl(base, OXU_DEVICEID);
  3136. dev_info(&pdev->dev, "device ID %x\n", id);
  3137. if ((id & OXU_REV_MASK) != (OXU_REV_2100 << OXU_REV_SHIFT))
  3138. return -1;
  3139. dev_info(&pdev->dev, "found device %x %s (%04x:%04x)\n",
  3140. id >> OXU_REV_SHIFT,
  3141. bo[(id & OXU_BO_MASK) >> OXU_BO_SHIFT],
  3142. (id & OXU_MAJ_REV_MASK) >> OXU_MAJ_REV_SHIFT,
  3143. (id & OXU_MIN_REV_MASK) >> OXU_MIN_REV_SHIFT);
  3144. return 0;
  3145. }
  3146. static const struct hc_driver oxu_hc_driver;
  3147. static struct usb_hcd *oxu_create(struct platform_device *pdev,
  3148. unsigned long memstart, unsigned long memlen,
  3149. void *base, int irq, int otg)
  3150. {
  3151. struct device *dev = &pdev->dev;
  3152. struct usb_hcd *hcd;
  3153. struct oxu_hcd *oxu;
  3154. int ret;
  3155. /* Set endian mode and host mode */
  3156. oxu_writel(base + (otg ? OXU_OTG_CORE_OFFSET : OXU_SPH_CORE_OFFSET),
  3157. OXU_USBMODE,
  3158. OXU_CM_HOST_ONLY | OXU_ES_LITTLE | OXU_VBPS);
  3159. hcd = usb_create_hcd(&oxu_hc_driver, dev,
  3160. otg ? "oxu210hp_otg" : "oxu210hp_sph");
  3161. if (!hcd)
  3162. return ERR_PTR(-ENOMEM);
  3163. hcd->rsrc_start = memstart;
  3164. hcd->rsrc_len = memlen;
  3165. hcd->regs = base;
  3166. hcd->irq = irq;
  3167. hcd->state = HC_STATE_HALT;
  3168. oxu = hcd_to_oxu(hcd);
  3169. oxu->is_otg = otg;
  3170. ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
  3171. if (ret < 0)
  3172. return ERR_PTR(ret);
  3173. device_wakeup_enable(hcd->self.controller);
  3174. return hcd;
  3175. }
  3176. static int oxu_init(struct platform_device *pdev,
  3177. unsigned long memstart, unsigned long memlen,
  3178. void *base, int irq)
  3179. {
  3180. struct oxu_info *info = platform_get_drvdata(pdev);
  3181. struct usb_hcd *hcd;
  3182. int ret;
  3183. /* First time configuration at start up */
  3184. oxu_configuration(pdev, base);
  3185. ret = oxu_verify_id(pdev, base);
  3186. if (ret) {
  3187. dev_err(&pdev->dev, "no devices found!\n");
  3188. return -ENODEV;
  3189. }
  3190. /* Create the OTG controller */
  3191. hcd = oxu_create(pdev, memstart, memlen, base, irq, 1);
  3192. if (IS_ERR(hcd)) {
  3193. dev_err(&pdev->dev, "cannot create OTG controller!\n");
  3194. ret = PTR_ERR(hcd);
  3195. goto error_create_otg;
  3196. }
  3197. info->hcd[0] = hcd;
  3198. /* Create the SPH host controller */
  3199. hcd = oxu_create(pdev, memstart, memlen, base, irq, 0);
  3200. if (IS_ERR(hcd)) {
  3201. dev_err(&pdev->dev, "cannot create SPH controller!\n");
  3202. ret = PTR_ERR(hcd);
  3203. goto error_create_sph;
  3204. }
  3205. info->hcd[1] = hcd;
  3206. oxu_writel(base, OXU_CHIPIRQEN_SET,
  3207. oxu_readl(base, OXU_CHIPIRQEN_SET) | 3);
  3208. return 0;
  3209. error_create_sph:
  3210. usb_remove_hcd(info->hcd[0]);
  3211. usb_put_hcd(info->hcd[0]);
  3212. error_create_otg:
  3213. return ret;
  3214. }
  3215. static int oxu_drv_probe(struct platform_device *pdev)
  3216. {
  3217. struct resource *res;
  3218. void *base;
  3219. unsigned long memstart, memlen;
  3220. int irq, ret;
  3221. struct oxu_info *info;
  3222. if (usb_disabled())
  3223. return -ENODEV;
  3224. /*
  3225. * Get the platform resources
  3226. */
  3227. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  3228. if (!res) {
  3229. dev_err(&pdev->dev,
  3230. "no IRQ! Check %s setup!\n", dev_name(&pdev->dev));
  3231. return -ENODEV;
  3232. }
  3233. irq = res->start;
  3234. dev_dbg(&pdev->dev, "IRQ resource %d\n", irq);
  3235. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  3236. base = devm_ioremap_resource(&pdev->dev, res);
  3237. if (IS_ERR(base)) {
  3238. ret = PTR_ERR(base);
  3239. goto error;
  3240. }
  3241. memstart = res->start;
  3242. memlen = resource_size(res);
  3243. ret = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING);
  3244. if (ret) {
  3245. dev_err(&pdev->dev, "error setting irq type\n");
  3246. ret = -EFAULT;
  3247. goto error;
  3248. }
  3249. /* Allocate a driver data struct to hold useful info for both
  3250. * SPH & OTG devices
  3251. */
  3252. info = devm_kzalloc(&pdev->dev, sizeof(struct oxu_info), GFP_KERNEL);
  3253. if (!info) {
  3254. ret = -EFAULT;
  3255. goto error;
  3256. }
  3257. platform_set_drvdata(pdev, info);
  3258. ret = oxu_init(pdev, memstart, memlen, base, irq);
  3259. if (ret < 0) {
  3260. dev_dbg(&pdev->dev, "cannot init USB devices\n");
  3261. goto error;
  3262. }
  3263. dev_info(&pdev->dev, "devices enabled and running\n");
  3264. platform_set_drvdata(pdev, info);
  3265. return 0;
  3266. error:
  3267. dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret);
  3268. return ret;
  3269. }
  3270. static void oxu_remove(struct platform_device *pdev, struct usb_hcd *hcd)
  3271. {
  3272. usb_remove_hcd(hcd);
  3273. usb_put_hcd(hcd);
  3274. }
  3275. static int oxu_drv_remove(struct platform_device *pdev)
  3276. {
  3277. struct oxu_info *info = platform_get_drvdata(pdev);
  3278. oxu_remove(pdev, info->hcd[0]);
  3279. oxu_remove(pdev, info->hcd[1]);
  3280. return 0;
  3281. }
  3282. static void oxu_drv_shutdown(struct platform_device *pdev)
  3283. {
  3284. oxu_drv_remove(pdev);
  3285. }
  3286. #if 0
  3287. /* FIXME: TODO */
  3288. static int oxu_drv_suspend(struct device *dev)
  3289. {
  3290. struct platform_device *pdev = to_platform_device(dev);
  3291. struct usb_hcd *hcd = dev_get_drvdata(dev);
  3292. return 0;
  3293. }
  3294. static int oxu_drv_resume(struct device *dev)
  3295. {
  3296. struct platform_device *pdev = to_platform_device(dev);
  3297. struct usb_hcd *hcd = dev_get_drvdata(dev);
  3298. return 0;
  3299. }
  3300. #else
  3301. #define oxu_drv_suspend NULL
  3302. #define oxu_drv_resume NULL
  3303. #endif
  3304. static struct platform_driver oxu_driver = {
  3305. .probe = oxu_drv_probe,
  3306. .remove = oxu_drv_remove,
  3307. .shutdown = oxu_drv_shutdown,
  3308. .suspend = oxu_drv_suspend,
  3309. .resume = oxu_drv_resume,
  3310. .driver = {
  3311. .name = "oxu210hp-hcd",
  3312. .bus = &platform_bus_type
  3313. }
  3314. };
  3315. module_platform_driver(oxu_driver);
  3316. MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION);
  3317. MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
  3318. MODULE_LICENSE("GPL");