vxge-traffic.c 65 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457
  1. /******************************************************************************
  2. * This software may be used and distributed according to the terms of
  3. * the GNU General Public License (GPL), incorporated herein by reference.
  4. * Drivers based on or derived from this code fall under the GPL and must
  5. * retain the authorship, copyright and license notice. This file is not
  6. * a complete program and may only be used when the entire operating
  7. * system is licensed under the GPL.
  8. * See the file COPYING in this distribution for more information.
  9. *
  10. * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
  11. * Virtualized Server Adapter.
  12. * Copyright(c) 2002-2010 Exar Corp.
  13. ******************************************************************************/
  14. #include <linux/etherdevice.h>
  15. #include <linux/prefetch.h>
  16. #include "vxge-traffic.h"
  17. #include "vxge-config.h"
  18. #include "vxge-main.h"
  19. /*
  20. * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
  21. * @vp: Virtual Path handle.
  22. *
  23. * Enable vpath interrupts. The function is to be executed the last in
  24. * vpath initialization sequence.
  25. *
  26. * See also: vxge_hw_vpath_intr_disable()
  27. */
  28. enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
  29. {
  30. u64 val64;
  31. struct __vxge_hw_virtualpath *vpath;
  32. struct vxge_hw_vpath_reg __iomem *vp_reg;
  33. enum vxge_hw_status status = VXGE_HW_OK;
  34. if (vp == NULL) {
  35. status = VXGE_HW_ERR_INVALID_HANDLE;
  36. goto exit;
  37. }
  38. vpath = vp->vpath;
  39. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  40. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  41. goto exit;
  42. }
  43. vp_reg = vpath->vp_reg;
  44. writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
  45. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  46. &vp_reg->general_errors_reg);
  47. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  48. &vp_reg->pci_config_errors_reg);
  49. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  50. &vp_reg->mrpcim_to_vpath_alarm_reg);
  51. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  52. &vp_reg->srpcim_to_vpath_alarm_reg);
  53. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  54. &vp_reg->vpath_ppif_int_status);
  55. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  56. &vp_reg->srpcim_msg_to_vpath_reg);
  57. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  58. &vp_reg->vpath_pcipif_int_status);
  59. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  60. &vp_reg->prc_alarm_reg);
  61. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  62. &vp_reg->wrdma_alarm_status);
  63. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  64. &vp_reg->asic_ntwk_vp_err_reg);
  65. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  66. &vp_reg->xgmac_vp_int_status);
  67. val64 = readq(&vp_reg->vpath_general_int_status);
  68. /* Mask unwanted interrupts */
  69. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  70. &vp_reg->vpath_pcipif_int_mask);
  71. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  72. &vp_reg->srpcim_msg_to_vpath_mask);
  73. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  74. &vp_reg->srpcim_to_vpath_alarm_mask);
  75. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  76. &vp_reg->mrpcim_to_vpath_alarm_mask);
  77. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  78. &vp_reg->pci_config_errors_mask);
  79. /* Unmask the individual interrupts */
  80. writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
  81. VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
  82. VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
  83. VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
  84. &vp_reg->general_errors_mask);
  85. __vxge_hw_pio_mem_write32_upper(
  86. (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
  87. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
  88. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
  89. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
  90. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
  91. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
  92. &vp_reg->kdfcctl_errors_mask);
  93. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
  94. __vxge_hw_pio_mem_write32_upper(
  95. (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
  96. &vp_reg->prc_alarm_mask);
  97. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
  98. __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
  99. if (vpath->hldev->first_vp_id != vpath->vp_id)
  100. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  101. &vp_reg->asic_ntwk_vp_err_mask);
  102. else
  103. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
  104. VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
  105. VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
  106. &vp_reg->asic_ntwk_vp_err_mask);
  107. __vxge_hw_pio_mem_write32_upper(0,
  108. &vp_reg->vpath_general_int_mask);
  109. exit:
  110. return status;
  111. }
  112. /*
  113. * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
  114. * @vp: Virtual Path handle.
  115. *
  116. * Disable vpath interrupts. The function is to be executed the last in
  117. * vpath initialization sequence.
  118. *
  119. * See also: vxge_hw_vpath_intr_enable()
  120. */
  121. enum vxge_hw_status vxge_hw_vpath_intr_disable(
  122. struct __vxge_hw_vpath_handle *vp)
  123. {
  124. u64 val64;
  125. struct __vxge_hw_virtualpath *vpath;
  126. enum vxge_hw_status status = VXGE_HW_OK;
  127. struct vxge_hw_vpath_reg __iomem *vp_reg;
  128. if (vp == NULL) {
  129. status = VXGE_HW_ERR_INVALID_HANDLE;
  130. goto exit;
  131. }
  132. vpath = vp->vpath;
  133. if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
  134. status = VXGE_HW_ERR_VPATH_NOT_OPEN;
  135. goto exit;
  136. }
  137. vp_reg = vpath->vp_reg;
  138. __vxge_hw_pio_mem_write32_upper(
  139. (u32)VXGE_HW_INTR_MASK_ALL,
  140. &vp_reg->vpath_general_int_mask);
  141. val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
  142. writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
  143. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  144. &vp_reg->general_errors_mask);
  145. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  146. &vp_reg->pci_config_errors_mask);
  147. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  148. &vp_reg->mrpcim_to_vpath_alarm_mask);
  149. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  150. &vp_reg->srpcim_to_vpath_alarm_mask);
  151. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  152. &vp_reg->vpath_ppif_int_mask);
  153. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  154. &vp_reg->srpcim_msg_to_vpath_mask);
  155. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  156. &vp_reg->vpath_pcipif_int_mask);
  157. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  158. &vp_reg->wrdma_alarm_mask);
  159. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  160. &vp_reg->prc_alarm_mask);
  161. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  162. &vp_reg->xgmac_vp_int_mask);
  163. __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
  164. &vp_reg->asic_ntwk_vp_err_mask);
  165. exit:
  166. return status;
  167. }
  168. void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
  169. {
  170. struct vxge_hw_vpath_reg __iomem *vp_reg;
  171. struct vxge_hw_vp_config *config;
  172. u64 val64;
  173. if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
  174. return;
  175. vp_reg = fifo->vp_reg;
  176. config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
  177. if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
  178. config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
  179. val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
  180. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
  181. fifo->tim_tti_cfg1_saved = val64;
  182. writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
  183. }
  184. }
  185. void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
  186. {
  187. u64 val64 = ring->tim_rti_cfg1_saved;
  188. val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
  189. ring->tim_rti_cfg1_saved = val64;
  190. writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
  191. }
  192. void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
  193. {
  194. u64 val64 = fifo->tim_tti_cfg3_saved;
  195. u64 timer = (fifo->rtimer * 1000) / 272;
  196. val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
  197. if (timer)
  198. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
  199. VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
  200. writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
  201. /* tti_cfg3_saved is not updated again because it is
  202. * initialized at one place only - init time.
  203. */
  204. }
  205. void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
  206. {
  207. u64 val64 = ring->tim_rti_cfg3_saved;
  208. u64 timer = (ring->rtimer * 1000) / 272;
  209. val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
  210. if (timer)
  211. val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
  212. VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
  213. writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
  214. /* rti_cfg3_saved is not updated again because it is
  215. * initialized at one place only - init time.
  216. */
  217. }
  218. /**
  219. * vxge_hw_channel_msix_mask - Mask MSIX Vector.
  220. * @channeh: Channel for rx or tx handle
  221. * @msix_id: MSIX ID
  222. *
  223. * The function masks the msix interrupt for the given msix_id
  224. *
  225. * Returns: 0
  226. */
  227. void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
  228. {
  229. __vxge_hw_pio_mem_write32_upper(
  230. (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  231. &channel->common_reg->set_msix_mask_vect[msix_id%4]);
  232. }
  233. /**
  234. * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
  235. * @channeh: Channel for rx or tx handle
  236. * @msix_id: MSI ID
  237. *
  238. * The function unmasks the msix interrupt for the given msix_id
  239. *
  240. * Returns: 0
  241. */
  242. void
  243. vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
  244. {
  245. __vxge_hw_pio_mem_write32_upper(
  246. (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  247. &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
  248. }
  249. /**
  250. * vxge_hw_channel_msix_clear - Unmask the MSIX Vector.
  251. * @channel: Channel for rx or tx handle
  252. * @msix_id: MSI ID
  253. *
  254. * The function unmasks the msix interrupt for the given msix_id
  255. * if configured in MSIX oneshot mode
  256. *
  257. * Returns: 0
  258. */
  259. void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
  260. {
  261. __vxge_hw_pio_mem_write32_upper(
  262. (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  263. &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
  264. }
  265. /**
  266. * vxge_hw_device_set_intr_type - Updates the configuration
  267. * with new interrupt type.
  268. * @hldev: HW device handle.
  269. * @intr_mode: New interrupt type
  270. */
  271. u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
  272. {
  273. if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
  274. (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
  275. (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
  276. (intr_mode != VXGE_HW_INTR_MODE_DEF))
  277. intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
  278. hldev->config.intr_mode = intr_mode;
  279. return intr_mode;
  280. }
  281. /**
  282. * vxge_hw_device_intr_enable - Enable interrupts.
  283. * @hldev: HW device handle.
  284. * @op: One of the enum vxge_hw_device_intr enumerated values specifying
  285. * the type(s) of interrupts to enable.
  286. *
  287. * Enable Titan interrupts. The function is to be executed the last in
  288. * Titan initialization sequence.
  289. *
  290. * See also: vxge_hw_device_intr_disable()
  291. */
  292. void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
  293. {
  294. u32 i;
  295. u64 val64;
  296. u32 val32;
  297. vxge_hw_device_mask_all(hldev);
  298. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  299. if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
  300. continue;
  301. vxge_hw_vpath_intr_enable(
  302. VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
  303. }
  304. if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
  305. val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  306. hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
  307. if (val64 != 0) {
  308. writeq(val64, &hldev->common_reg->tim_int_status0);
  309. writeq(~val64, &hldev->common_reg->tim_int_mask0);
  310. }
  311. val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  312. hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
  313. if (val32 != 0) {
  314. __vxge_hw_pio_mem_write32_upper(val32,
  315. &hldev->common_reg->tim_int_status1);
  316. __vxge_hw_pio_mem_write32_upper(~val32,
  317. &hldev->common_reg->tim_int_mask1);
  318. }
  319. }
  320. val64 = readq(&hldev->common_reg->titan_general_int_status);
  321. vxge_hw_device_unmask_all(hldev);
  322. }
  323. /**
  324. * vxge_hw_device_intr_disable - Disable Titan interrupts.
  325. * @hldev: HW device handle.
  326. * @op: One of the enum vxge_hw_device_intr enumerated values specifying
  327. * the type(s) of interrupts to disable.
  328. *
  329. * Disable Titan interrupts.
  330. *
  331. * See also: vxge_hw_device_intr_enable()
  332. */
  333. void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
  334. {
  335. u32 i;
  336. vxge_hw_device_mask_all(hldev);
  337. /* mask all the tim interrupts */
  338. writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
  339. __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
  340. &hldev->common_reg->tim_int_mask1);
  341. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  342. if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
  343. continue;
  344. vxge_hw_vpath_intr_disable(
  345. VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
  346. }
  347. }
  348. /**
  349. * vxge_hw_device_mask_all - Mask all device interrupts.
  350. * @hldev: HW device handle.
  351. *
  352. * Mask all device interrupts.
  353. *
  354. * See also: vxge_hw_device_unmask_all()
  355. */
  356. void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
  357. {
  358. u64 val64;
  359. val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
  360. VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
  361. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  362. &hldev->common_reg->titan_mask_all_int);
  363. }
  364. /**
  365. * vxge_hw_device_unmask_all - Unmask all device interrupts.
  366. * @hldev: HW device handle.
  367. *
  368. * Unmask all device interrupts.
  369. *
  370. * See also: vxge_hw_device_mask_all()
  371. */
  372. void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
  373. {
  374. u64 val64 = 0;
  375. if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
  376. val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
  377. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
  378. &hldev->common_reg->titan_mask_all_int);
  379. }
  380. /**
  381. * vxge_hw_device_flush_io - Flush io writes.
  382. * @hldev: HW device handle.
  383. *
  384. * The function performs a read operation to flush io writes.
  385. *
  386. * Returns: void
  387. */
  388. void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
  389. {
  390. u32 val32;
  391. val32 = readl(&hldev->common_reg->titan_general_int_status);
  392. }
  393. /**
  394. * __vxge_hw_device_handle_error - Handle error
  395. * @hldev: HW device
  396. * @vp_id: Vpath Id
  397. * @type: Error type. Please see enum vxge_hw_event{}
  398. *
  399. * Handle error.
  400. */
  401. static enum vxge_hw_status
  402. __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
  403. enum vxge_hw_event type)
  404. {
  405. switch (type) {
  406. case VXGE_HW_EVENT_UNKNOWN:
  407. break;
  408. case VXGE_HW_EVENT_RESET_START:
  409. case VXGE_HW_EVENT_RESET_COMPLETE:
  410. case VXGE_HW_EVENT_LINK_DOWN:
  411. case VXGE_HW_EVENT_LINK_UP:
  412. goto out;
  413. case VXGE_HW_EVENT_ALARM_CLEARED:
  414. goto out;
  415. case VXGE_HW_EVENT_ECCERR:
  416. case VXGE_HW_EVENT_MRPCIM_ECCERR:
  417. goto out;
  418. case VXGE_HW_EVENT_FIFO_ERR:
  419. case VXGE_HW_EVENT_VPATH_ERR:
  420. case VXGE_HW_EVENT_CRITICAL_ERR:
  421. case VXGE_HW_EVENT_SERR:
  422. break;
  423. case VXGE_HW_EVENT_SRPCIM_SERR:
  424. case VXGE_HW_EVENT_MRPCIM_SERR:
  425. goto out;
  426. case VXGE_HW_EVENT_SLOT_FREEZE:
  427. break;
  428. default:
  429. vxge_assert(0);
  430. goto out;
  431. }
  432. /* notify driver */
  433. if (hldev->uld_callbacks->crit_err)
  434. hldev->uld_callbacks->crit_err(hldev,
  435. type, vp_id);
  436. out:
  437. return VXGE_HW_OK;
  438. }
  439. /*
  440. * __vxge_hw_device_handle_link_down_ind
  441. * @hldev: HW device handle.
  442. *
  443. * Link down indication handler. The function is invoked by HW when
  444. * Titan indicates that the link is down.
  445. */
  446. static enum vxge_hw_status
  447. __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
  448. {
  449. /*
  450. * If the previous link state is not down, return.
  451. */
  452. if (hldev->link_state == VXGE_HW_LINK_DOWN)
  453. goto exit;
  454. hldev->link_state = VXGE_HW_LINK_DOWN;
  455. /* notify driver */
  456. if (hldev->uld_callbacks->link_down)
  457. hldev->uld_callbacks->link_down(hldev);
  458. exit:
  459. return VXGE_HW_OK;
  460. }
  461. /*
  462. * __vxge_hw_device_handle_link_up_ind
  463. * @hldev: HW device handle.
  464. *
  465. * Link up indication handler. The function is invoked by HW when
  466. * Titan indicates that the link is up for programmable amount of time.
  467. */
  468. static enum vxge_hw_status
  469. __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
  470. {
  471. /*
  472. * If the previous link state is not down, return.
  473. */
  474. if (hldev->link_state == VXGE_HW_LINK_UP)
  475. goto exit;
  476. hldev->link_state = VXGE_HW_LINK_UP;
  477. /* notify driver */
  478. if (hldev->uld_callbacks->link_up)
  479. hldev->uld_callbacks->link_up(hldev);
  480. exit:
  481. return VXGE_HW_OK;
  482. }
  483. /*
  484. * __vxge_hw_vpath_alarm_process - Process Alarms.
  485. * @vpath: Virtual Path.
  486. * @skip_alarms: Do not clear the alarms
  487. *
  488. * Process vpath alarms.
  489. *
  490. */
  491. static enum vxge_hw_status
  492. __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
  493. u32 skip_alarms)
  494. {
  495. u64 val64;
  496. u64 alarm_status;
  497. u64 pic_status;
  498. struct __vxge_hw_device *hldev = NULL;
  499. enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
  500. u64 mask64;
  501. struct vxge_hw_vpath_stats_sw_info *sw_stats;
  502. struct vxge_hw_vpath_reg __iomem *vp_reg;
  503. if (vpath == NULL) {
  504. alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
  505. alarm_event);
  506. goto out2;
  507. }
  508. hldev = vpath->hldev;
  509. vp_reg = vpath->vp_reg;
  510. alarm_status = readq(&vp_reg->vpath_general_int_status);
  511. if (alarm_status == VXGE_HW_ALL_FOXES) {
  512. alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
  513. alarm_event);
  514. goto out;
  515. }
  516. sw_stats = vpath->sw_stats;
  517. if (alarm_status & ~(
  518. VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
  519. VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
  520. VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
  521. VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
  522. sw_stats->error_stats.unknown_alarms++;
  523. alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
  524. alarm_event);
  525. goto out;
  526. }
  527. if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
  528. val64 = readq(&vp_reg->xgmac_vp_int_status);
  529. if (val64 &
  530. VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
  531. val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
  532. if (((val64 &
  533. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
  534. (!(val64 &
  535. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
  536. ((val64 &
  537. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
  538. (!(val64 &
  539. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
  540. ))) {
  541. sw_stats->error_stats.network_sustained_fault++;
  542. writeq(
  543. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
  544. &vp_reg->asic_ntwk_vp_err_mask);
  545. __vxge_hw_device_handle_link_down_ind(hldev);
  546. alarm_event = VXGE_HW_SET_LEVEL(
  547. VXGE_HW_EVENT_LINK_DOWN, alarm_event);
  548. }
  549. if (((val64 &
  550. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
  551. (!(val64 &
  552. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
  553. ((val64 &
  554. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
  555. (!(val64 &
  556. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
  557. ))) {
  558. sw_stats->error_stats.network_sustained_ok++;
  559. writeq(
  560. VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
  561. &vp_reg->asic_ntwk_vp_err_mask);
  562. __vxge_hw_device_handle_link_up_ind(hldev);
  563. alarm_event = VXGE_HW_SET_LEVEL(
  564. VXGE_HW_EVENT_LINK_UP, alarm_event);
  565. }
  566. writeq(VXGE_HW_INTR_MASK_ALL,
  567. &vp_reg->asic_ntwk_vp_err_reg);
  568. alarm_event = VXGE_HW_SET_LEVEL(
  569. VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
  570. if (skip_alarms)
  571. return VXGE_HW_OK;
  572. }
  573. }
  574. if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
  575. pic_status = readq(&vp_reg->vpath_ppif_int_status);
  576. if (pic_status &
  577. VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
  578. val64 = readq(&vp_reg->general_errors_reg);
  579. mask64 = readq(&vp_reg->general_errors_mask);
  580. if ((val64 &
  581. VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
  582. ~mask64) {
  583. sw_stats->error_stats.ini_serr_det++;
  584. alarm_event = VXGE_HW_SET_LEVEL(
  585. VXGE_HW_EVENT_SERR, alarm_event);
  586. }
  587. if ((val64 &
  588. VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
  589. ~mask64) {
  590. sw_stats->error_stats.dblgen_fifo0_overflow++;
  591. alarm_event = VXGE_HW_SET_LEVEL(
  592. VXGE_HW_EVENT_FIFO_ERR, alarm_event);
  593. }
  594. if ((val64 &
  595. VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
  596. ~mask64)
  597. sw_stats->error_stats.statsb_pif_chain_error++;
  598. if ((val64 &
  599. VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
  600. ~mask64)
  601. sw_stats->error_stats.statsb_drop_timeout++;
  602. if ((val64 &
  603. VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
  604. ~mask64)
  605. sw_stats->error_stats.target_illegal_access++;
  606. if (!skip_alarms) {
  607. writeq(VXGE_HW_INTR_MASK_ALL,
  608. &vp_reg->general_errors_reg);
  609. alarm_event = VXGE_HW_SET_LEVEL(
  610. VXGE_HW_EVENT_ALARM_CLEARED,
  611. alarm_event);
  612. }
  613. }
  614. if (pic_status &
  615. VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
  616. val64 = readq(&vp_reg->kdfcctl_errors_reg);
  617. mask64 = readq(&vp_reg->kdfcctl_errors_mask);
  618. if ((val64 &
  619. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
  620. ~mask64) {
  621. sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
  622. alarm_event = VXGE_HW_SET_LEVEL(
  623. VXGE_HW_EVENT_FIFO_ERR,
  624. alarm_event);
  625. }
  626. if ((val64 &
  627. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
  628. ~mask64) {
  629. sw_stats->error_stats.kdfcctl_fifo0_poison++;
  630. alarm_event = VXGE_HW_SET_LEVEL(
  631. VXGE_HW_EVENT_FIFO_ERR,
  632. alarm_event);
  633. }
  634. if ((val64 &
  635. VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
  636. ~mask64) {
  637. sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
  638. alarm_event = VXGE_HW_SET_LEVEL(
  639. VXGE_HW_EVENT_FIFO_ERR,
  640. alarm_event);
  641. }
  642. if (!skip_alarms) {
  643. writeq(VXGE_HW_INTR_MASK_ALL,
  644. &vp_reg->kdfcctl_errors_reg);
  645. alarm_event = VXGE_HW_SET_LEVEL(
  646. VXGE_HW_EVENT_ALARM_CLEARED,
  647. alarm_event);
  648. }
  649. }
  650. }
  651. if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
  652. val64 = readq(&vp_reg->wrdma_alarm_status);
  653. if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
  654. val64 = readq(&vp_reg->prc_alarm_reg);
  655. mask64 = readq(&vp_reg->prc_alarm_mask);
  656. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
  657. ~mask64)
  658. sw_stats->error_stats.prc_ring_bumps++;
  659. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
  660. ~mask64) {
  661. sw_stats->error_stats.prc_rxdcm_sc_err++;
  662. alarm_event = VXGE_HW_SET_LEVEL(
  663. VXGE_HW_EVENT_VPATH_ERR,
  664. alarm_event);
  665. }
  666. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
  667. & ~mask64) {
  668. sw_stats->error_stats.prc_rxdcm_sc_abort++;
  669. alarm_event = VXGE_HW_SET_LEVEL(
  670. VXGE_HW_EVENT_VPATH_ERR,
  671. alarm_event);
  672. }
  673. if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
  674. & ~mask64) {
  675. sw_stats->error_stats.prc_quanta_size_err++;
  676. alarm_event = VXGE_HW_SET_LEVEL(
  677. VXGE_HW_EVENT_VPATH_ERR,
  678. alarm_event);
  679. }
  680. if (!skip_alarms) {
  681. writeq(VXGE_HW_INTR_MASK_ALL,
  682. &vp_reg->prc_alarm_reg);
  683. alarm_event = VXGE_HW_SET_LEVEL(
  684. VXGE_HW_EVENT_ALARM_CLEARED,
  685. alarm_event);
  686. }
  687. }
  688. }
  689. out:
  690. hldev->stats.sw_dev_err_stats.vpath_alarms++;
  691. out2:
  692. if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
  693. (alarm_event == VXGE_HW_EVENT_UNKNOWN))
  694. return VXGE_HW_OK;
  695. __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
  696. if (alarm_event == VXGE_HW_EVENT_SERR)
  697. return VXGE_HW_ERR_CRITICAL;
  698. return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
  699. VXGE_HW_ERR_SLOT_FREEZE :
  700. (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
  701. VXGE_HW_ERR_VPATH;
  702. }
  703. /**
  704. * vxge_hw_device_begin_irq - Begin IRQ processing.
  705. * @hldev: HW device handle.
  706. * @skip_alarms: Do not clear the alarms
  707. * @reason: "Reason" for the interrupt, the value of Titan's
  708. * general_int_status register.
  709. *
  710. * The function performs two actions, It first checks whether (shared IRQ) the
  711. * interrupt was raised by the device. Next, it masks the device interrupts.
  712. *
  713. * Note:
  714. * vxge_hw_device_begin_irq() does not flush MMIO writes through the
  715. * bridge. Therefore, two back-to-back interrupts are potentially possible.
  716. *
  717. * Returns: 0, if the interrupt is not "ours" (note that in this case the
  718. * device remain enabled).
  719. * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
  720. * status.
  721. */
  722. enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
  723. u32 skip_alarms, u64 *reason)
  724. {
  725. u32 i;
  726. u64 val64;
  727. u64 adapter_status;
  728. u64 vpath_mask;
  729. enum vxge_hw_status ret = VXGE_HW_OK;
  730. val64 = readq(&hldev->common_reg->titan_general_int_status);
  731. if (unlikely(!val64)) {
  732. /* not Titan interrupt */
  733. *reason = 0;
  734. ret = VXGE_HW_ERR_WRONG_IRQ;
  735. goto exit;
  736. }
  737. if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
  738. adapter_status = readq(&hldev->common_reg->adapter_status);
  739. if (adapter_status == VXGE_HW_ALL_FOXES) {
  740. __vxge_hw_device_handle_error(hldev,
  741. NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
  742. *reason = 0;
  743. ret = VXGE_HW_ERR_SLOT_FREEZE;
  744. goto exit;
  745. }
  746. }
  747. hldev->stats.sw_dev_info_stats.total_intr_cnt++;
  748. *reason = val64;
  749. vpath_mask = hldev->vpaths_deployed >>
  750. (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
  751. if (val64 &
  752. VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
  753. hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
  754. return VXGE_HW_OK;
  755. }
  756. hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
  757. if (unlikely(val64 &
  758. VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
  759. enum vxge_hw_status error_level = VXGE_HW_OK;
  760. hldev->stats.sw_dev_err_stats.vpath_alarms++;
  761. for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
  762. if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
  763. continue;
  764. ret = __vxge_hw_vpath_alarm_process(
  765. &hldev->virtual_paths[i], skip_alarms);
  766. error_level = VXGE_HW_SET_LEVEL(ret, error_level);
  767. if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
  768. (ret == VXGE_HW_ERR_SLOT_FREEZE)))
  769. break;
  770. }
  771. ret = error_level;
  772. }
  773. exit:
  774. return ret;
  775. }
  776. /**
  777. * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
  778. * condition that has caused the Tx and RX interrupt.
  779. * @hldev: HW device.
  780. *
  781. * Acknowledge (that is, clear) the condition that has caused
  782. * the Tx and Rx interrupt.
  783. * See also: vxge_hw_device_begin_irq(),
  784. * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
  785. */
  786. void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
  787. {
  788. if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
  789. (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
  790. writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  791. hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
  792. &hldev->common_reg->tim_int_status0);
  793. }
  794. if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
  795. (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
  796. __vxge_hw_pio_mem_write32_upper(
  797. (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  798. hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
  799. &hldev->common_reg->tim_int_status1);
  800. }
  801. }
  802. /*
  803. * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
  804. * @channel: Channel
  805. * @dtrh: Buffer to return the DTR pointer
  806. *
  807. * Allocates a dtr from the reserve array. If the reserve array is empty,
  808. * it swaps the reserve and free arrays.
  809. *
  810. */
  811. static enum vxge_hw_status
  812. vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
  813. {
  814. if (channel->reserve_ptr - channel->reserve_top > 0) {
  815. _alloc_after_swap:
  816. *dtrh = channel->reserve_arr[--channel->reserve_ptr];
  817. return VXGE_HW_OK;
  818. }
  819. /* switch between empty and full arrays */
  820. /* the idea behind such a design is that by having free and reserved
  821. * arrays separated we basically separated irq and non-irq parts.
  822. * i.e. no additional lock need to be done when we free a resource */
  823. if (channel->length - channel->free_ptr > 0) {
  824. swap(channel->reserve_arr, channel->free_arr);
  825. channel->reserve_ptr = channel->length;
  826. channel->reserve_top = channel->free_ptr;
  827. channel->free_ptr = channel->length;
  828. channel->stats->reserve_free_swaps_cnt++;
  829. goto _alloc_after_swap;
  830. }
  831. channel->stats->full_cnt++;
  832. *dtrh = NULL;
  833. return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
  834. }
  835. /*
  836. * vxge_hw_channel_dtr_post - Post a dtr to the channel
  837. * @channelh: Channel
  838. * @dtrh: DTR pointer
  839. *
  840. * Posts a dtr to work array.
  841. *
  842. */
  843. static void
  844. vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
  845. {
  846. vxge_assert(channel->work_arr[channel->post_index] == NULL);
  847. channel->work_arr[channel->post_index++] = dtrh;
  848. /* wrap-around */
  849. if (channel->post_index == channel->length)
  850. channel->post_index = 0;
  851. }
  852. /*
  853. * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
  854. * @channel: Channel
  855. * @dtr: Buffer to return the next completed DTR pointer
  856. *
  857. * Returns the next completed dtr with out removing it from work array
  858. *
  859. */
  860. void
  861. vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
  862. {
  863. vxge_assert(channel->compl_index < channel->length);
  864. *dtrh = channel->work_arr[channel->compl_index];
  865. prefetch(*dtrh);
  866. }
  867. /*
  868. * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
  869. * @channel: Channel handle
  870. *
  871. * Removes the next completed dtr from work array
  872. *
  873. */
  874. void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
  875. {
  876. channel->work_arr[channel->compl_index] = NULL;
  877. /* wrap-around */
  878. if (++channel->compl_index == channel->length)
  879. channel->compl_index = 0;
  880. channel->stats->total_compl_cnt++;
  881. }
  882. /*
  883. * vxge_hw_channel_dtr_free - Frees a dtr
  884. * @channel: Channel handle
  885. * @dtr: DTR pointer
  886. *
  887. * Returns the dtr to free array
  888. *
  889. */
  890. void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
  891. {
  892. channel->free_arr[--channel->free_ptr] = dtrh;
  893. }
  894. /*
  895. * vxge_hw_channel_dtr_count
  896. * @channel: Channel handle. Obtained via vxge_hw_channel_open().
  897. *
  898. * Retrieve number of DTRs available. This function can not be called
  899. * from data path. ring_initial_replenishi() is the only user.
  900. */
  901. int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
  902. {
  903. return (channel->reserve_ptr - channel->reserve_top) +
  904. (channel->length - channel->free_ptr);
  905. }
  906. /**
  907. * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
  908. * @ring: Handle to the ring object used for receive
  909. * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
  910. * with a valid handle.
  911. *
  912. * Reserve Rx descriptor for the subsequent filling-in driver
  913. * and posting on the corresponding channel (@channelh)
  914. * via vxge_hw_ring_rxd_post().
  915. *
  916. * Returns: VXGE_HW_OK - success.
  917. * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
  918. *
  919. */
  920. enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
  921. void **rxdh)
  922. {
  923. enum vxge_hw_status status;
  924. struct __vxge_hw_channel *channel;
  925. channel = &ring->channel;
  926. status = vxge_hw_channel_dtr_alloc(channel, rxdh);
  927. if (status == VXGE_HW_OK) {
  928. struct vxge_hw_ring_rxd_1 *rxdp =
  929. (struct vxge_hw_ring_rxd_1 *)*rxdh;
  930. rxdp->control_0 = rxdp->control_1 = 0;
  931. }
  932. return status;
  933. }
  934. /**
  935. * vxge_hw_ring_rxd_free - Free descriptor.
  936. * @ring: Handle to the ring object used for receive
  937. * @rxdh: Descriptor handle.
  938. *
  939. * Free the reserved descriptor. This operation is "symmetrical" to
  940. * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
  941. * lifecycle.
  942. *
  943. * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
  944. * be:
  945. *
  946. * - reserved (vxge_hw_ring_rxd_reserve);
  947. *
  948. * - posted (vxge_hw_ring_rxd_post);
  949. *
  950. * - completed (vxge_hw_ring_rxd_next_completed);
  951. *
  952. * - and recycled again (vxge_hw_ring_rxd_free).
  953. *
  954. * For alternative state transitions and more details please refer to
  955. * the design doc.
  956. *
  957. */
  958. void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
  959. {
  960. struct __vxge_hw_channel *channel;
  961. channel = &ring->channel;
  962. vxge_hw_channel_dtr_free(channel, rxdh);
  963. }
  964. /**
  965. * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
  966. * @ring: Handle to the ring object used for receive
  967. * @rxdh: Descriptor handle.
  968. *
  969. * This routine prepares a rxd and posts
  970. */
  971. void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
  972. {
  973. struct __vxge_hw_channel *channel;
  974. channel = &ring->channel;
  975. vxge_hw_channel_dtr_post(channel, rxdh);
  976. }
  977. /**
  978. * vxge_hw_ring_rxd_post_post - Process rxd after post.
  979. * @ring: Handle to the ring object used for receive
  980. * @rxdh: Descriptor handle.
  981. *
  982. * Processes rxd after post
  983. */
  984. void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
  985. {
  986. struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
  987. rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
  988. if (ring->stats->common_stats.usage_cnt > 0)
  989. ring->stats->common_stats.usage_cnt--;
  990. }
  991. /**
  992. * vxge_hw_ring_rxd_post - Post descriptor on the ring.
  993. * @ring: Handle to the ring object used for receive
  994. * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
  995. *
  996. * Post descriptor on the ring.
  997. * Prior to posting the descriptor should be filled in accordance with
  998. * Host/Titan interface specification for a given service (LL, etc.).
  999. *
  1000. */
  1001. void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
  1002. {
  1003. struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
  1004. struct __vxge_hw_channel *channel;
  1005. channel = &ring->channel;
  1006. wmb();
  1007. rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
  1008. vxge_hw_channel_dtr_post(channel, rxdh);
  1009. if (ring->stats->common_stats.usage_cnt > 0)
  1010. ring->stats->common_stats.usage_cnt--;
  1011. }
  1012. /**
  1013. * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
  1014. * @ring: Handle to the ring object used for receive
  1015. * @rxdh: Descriptor handle.
  1016. *
  1017. * Processes rxd after post with memory barrier.
  1018. */
  1019. void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
  1020. {
  1021. wmb();
  1022. vxge_hw_ring_rxd_post_post(ring, rxdh);
  1023. }
  1024. /**
  1025. * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
  1026. * @ring: Handle to the ring object used for receive
  1027. * @rxdh: Descriptor handle. Returned by HW.
  1028. * @t_code: Transfer code, as per Titan User Guide,
  1029. * Receive Descriptor Format. Returned by HW.
  1030. *
  1031. * Retrieve the _next_ completed descriptor.
  1032. * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
  1033. * driver of new completed descriptors. After that
  1034. * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
  1035. * completions (the very first completion is passed by HW via
  1036. * vxge_hw_ring_callback_f).
  1037. *
  1038. * Implementation-wise, the driver is free to call
  1039. * vxge_hw_ring_rxd_next_completed either immediately from inside the
  1040. * ring callback, or in a deferred fashion and separate (from HW)
  1041. * context.
  1042. *
  1043. * Non-zero @t_code means failure to fill-in receive buffer(s)
  1044. * of the descriptor.
  1045. * For instance, parity error detected during the data transfer.
  1046. * In this case Titan will complete the descriptor and indicate
  1047. * for the host that the received data is not to be used.
  1048. * For details please refer to Titan User Guide.
  1049. *
  1050. * Returns: VXGE_HW_OK - success.
  1051. * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
  1052. * are currently available for processing.
  1053. *
  1054. * See also: vxge_hw_ring_callback_f{},
  1055. * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
  1056. */
  1057. enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
  1058. struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
  1059. {
  1060. struct __vxge_hw_channel *channel;
  1061. struct vxge_hw_ring_rxd_1 *rxdp;
  1062. enum vxge_hw_status status = VXGE_HW_OK;
  1063. u64 control_0, own;
  1064. channel = &ring->channel;
  1065. vxge_hw_channel_dtr_try_complete(channel, rxdh);
  1066. rxdp = *rxdh;
  1067. if (rxdp == NULL) {
  1068. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1069. goto exit;
  1070. }
  1071. control_0 = rxdp->control_0;
  1072. own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
  1073. *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
  1074. /* check whether it is not the end */
  1075. if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
  1076. vxge_assert((rxdp)->host_control !=
  1077. 0);
  1078. ++ring->cmpl_cnt;
  1079. vxge_hw_channel_dtr_complete(channel);
  1080. vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
  1081. ring->stats->common_stats.usage_cnt++;
  1082. if (ring->stats->common_stats.usage_max <
  1083. ring->stats->common_stats.usage_cnt)
  1084. ring->stats->common_stats.usage_max =
  1085. ring->stats->common_stats.usage_cnt;
  1086. status = VXGE_HW_OK;
  1087. goto exit;
  1088. }
  1089. /* reset it. since we don't want to return
  1090. * garbage to the driver */
  1091. *rxdh = NULL;
  1092. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1093. exit:
  1094. return status;
  1095. }
  1096. /**
  1097. * vxge_hw_ring_handle_tcode - Handle transfer code.
  1098. * @ring: Handle to the ring object used for receive
  1099. * @rxdh: Descriptor handle.
  1100. * @t_code: One of the enumerated (and documented in the Titan user guide)
  1101. * "transfer codes".
  1102. *
  1103. * Handle descriptor's transfer code. The latter comes with each completed
  1104. * descriptor.
  1105. *
  1106. * Returns: one of the enum vxge_hw_status{} enumerated types.
  1107. * VXGE_HW_OK - for success.
  1108. * VXGE_HW_ERR_CRITICAL - when encounters critical error.
  1109. */
  1110. enum vxge_hw_status vxge_hw_ring_handle_tcode(
  1111. struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
  1112. {
  1113. enum vxge_hw_status status = VXGE_HW_OK;
  1114. /* If the t_code is not supported and if the
  1115. * t_code is other than 0x5 (unparseable packet
  1116. * such as unknown UPV6 header), Drop it !!!
  1117. */
  1118. if (t_code == VXGE_HW_RING_T_CODE_OK ||
  1119. t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
  1120. status = VXGE_HW_OK;
  1121. goto exit;
  1122. }
  1123. if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
  1124. status = VXGE_HW_ERR_INVALID_TCODE;
  1125. goto exit;
  1126. }
  1127. ring->stats->rxd_t_code_err_cnt[t_code]++;
  1128. exit:
  1129. return status;
  1130. }
  1131. /**
  1132. * __vxge_hw_non_offload_db_post - Post non offload doorbell
  1133. *
  1134. * @fifo: fifohandle
  1135. * @txdl_ptr: The starting location of the TxDL in host memory
  1136. * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
  1137. * @no_snoop: No snoop flags
  1138. *
  1139. * This function posts a non-offload doorbell to doorbell FIFO
  1140. *
  1141. */
  1142. static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
  1143. u64 txdl_ptr, u32 num_txds, u32 no_snoop)
  1144. {
  1145. writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
  1146. VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
  1147. VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
  1148. &fifo->nofl_db->control_0);
  1149. mmiowb();
  1150. writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
  1151. mmiowb();
  1152. }
  1153. /**
  1154. * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
  1155. * the fifo
  1156. * @fifoh: Handle to the fifo object used for non offload send
  1157. */
  1158. u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
  1159. {
  1160. return vxge_hw_channel_dtr_count(&fifoh->channel);
  1161. }
  1162. /**
  1163. * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
  1164. * @fifoh: Handle to the fifo object used for non offload send
  1165. * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
  1166. * with a valid handle.
  1167. * @txdl_priv: Buffer to return the pointer to per txdl space
  1168. *
  1169. * Reserve a single TxDL (that is, fifo descriptor)
  1170. * for the subsequent filling-in by driver)
  1171. * and posting on the corresponding channel (@channelh)
  1172. * via vxge_hw_fifo_txdl_post().
  1173. *
  1174. * Note: it is the responsibility of driver to reserve multiple descriptors
  1175. * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
  1176. * carries up to configured number (fifo.max_frags) of contiguous buffers.
  1177. *
  1178. * Returns: VXGE_HW_OK - success;
  1179. * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
  1180. *
  1181. */
  1182. enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
  1183. struct __vxge_hw_fifo *fifo,
  1184. void **txdlh, void **txdl_priv)
  1185. {
  1186. struct __vxge_hw_channel *channel;
  1187. enum vxge_hw_status status;
  1188. int i;
  1189. channel = &fifo->channel;
  1190. status = vxge_hw_channel_dtr_alloc(channel, txdlh);
  1191. if (status == VXGE_HW_OK) {
  1192. struct vxge_hw_fifo_txd *txdp =
  1193. (struct vxge_hw_fifo_txd *)*txdlh;
  1194. struct __vxge_hw_fifo_txdl_priv *priv;
  1195. priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
  1196. /* reset the TxDL's private */
  1197. priv->align_dma_offset = 0;
  1198. priv->align_vaddr_start = priv->align_vaddr;
  1199. priv->align_used_frags = 0;
  1200. priv->frags = 0;
  1201. priv->alloc_frags = fifo->config->max_frags;
  1202. priv->next_txdl_priv = NULL;
  1203. *txdl_priv = (void *)(size_t)txdp->host_control;
  1204. for (i = 0; i < fifo->config->max_frags; i++) {
  1205. txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
  1206. txdp->control_0 = txdp->control_1 = 0;
  1207. }
  1208. }
  1209. return status;
  1210. }
  1211. /**
  1212. * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
  1213. * descriptor.
  1214. * @fifo: Handle to the fifo object used for non offload send
  1215. * @txdlh: Descriptor handle.
  1216. * @frag_idx: Index of the data buffer in the caller's scatter-gather list
  1217. * (of buffers).
  1218. * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
  1219. * @size: Size of the data buffer (in bytes).
  1220. *
  1221. * This API is part of the preparation of the transmit descriptor for posting
  1222. * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
  1223. * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
  1224. * All three APIs fill in the fields of the fifo descriptor,
  1225. * in accordance with the Titan specification.
  1226. *
  1227. */
  1228. void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
  1229. void *txdlh, u32 frag_idx,
  1230. dma_addr_t dma_pointer, u32 size)
  1231. {
  1232. struct __vxge_hw_fifo_txdl_priv *txdl_priv;
  1233. struct vxge_hw_fifo_txd *txdp, *txdp_last;
  1234. txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
  1235. txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
  1236. if (frag_idx != 0)
  1237. txdp->control_0 = txdp->control_1 = 0;
  1238. else {
  1239. txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
  1240. VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
  1241. txdp->control_1 |= fifo->interrupt_type;
  1242. txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
  1243. fifo->tx_intr_num);
  1244. if (txdl_priv->frags) {
  1245. txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
  1246. (txdl_priv->frags - 1);
  1247. txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
  1248. VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
  1249. }
  1250. }
  1251. vxge_assert(frag_idx < txdl_priv->alloc_frags);
  1252. txdp->buffer_pointer = (u64)dma_pointer;
  1253. txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
  1254. fifo->stats->total_buffers++;
  1255. txdl_priv->frags++;
  1256. }
  1257. /**
  1258. * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
  1259. * @fifo: Handle to the fifo object used for non offload send
  1260. * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
  1261. * @frags: Number of contiguous buffers that are part of a single
  1262. * transmit operation.
  1263. *
  1264. * Post descriptor on the 'fifo' type channel for transmission.
  1265. * Prior to posting the descriptor should be filled in accordance with
  1266. * Host/Titan interface specification for a given service (LL, etc.).
  1267. *
  1268. */
  1269. void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
  1270. {
  1271. struct __vxge_hw_fifo_txdl_priv *txdl_priv;
  1272. struct vxge_hw_fifo_txd *txdp_last;
  1273. struct vxge_hw_fifo_txd *txdp_first;
  1274. txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
  1275. txdp_first = txdlh;
  1276. txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
  1277. txdp_last->control_0 |=
  1278. VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
  1279. txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
  1280. vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
  1281. __vxge_hw_non_offload_db_post(fifo,
  1282. (u64)txdl_priv->dma_addr,
  1283. txdl_priv->frags - 1,
  1284. fifo->no_snoop_bits);
  1285. fifo->stats->total_posts++;
  1286. fifo->stats->common_stats.usage_cnt++;
  1287. if (fifo->stats->common_stats.usage_max <
  1288. fifo->stats->common_stats.usage_cnt)
  1289. fifo->stats->common_stats.usage_max =
  1290. fifo->stats->common_stats.usage_cnt;
  1291. }
  1292. /**
  1293. * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
  1294. * @fifo: Handle to the fifo object used for non offload send
  1295. * @txdlh: Descriptor handle. Returned by HW.
  1296. * @t_code: Transfer code, as per Titan User Guide,
  1297. * Transmit Descriptor Format.
  1298. * Returned by HW.
  1299. *
  1300. * Retrieve the _next_ completed descriptor.
  1301. * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
  1302. * driver of new completed descriptors. After that
  1303. * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
  1304. * completions (the very first completion is passed by HW via
  1305. * vxge_hw_channel_callback_f).
  1306. *
  1307. * Implementation-wise, the driver is free to call
  1308. * vxge_hw_fifo_txdl_next_completed either immediately from inside the
  1309. * channel callback, or in a deferred fashion and separate (from HW)
  1310. * context.
  1311. *
  1312. * Non-zero @t_code means failure to process the descriptor.
  1313. * The failure could happen, for instance, when the link is
  1314. * down, in which case Titan completes the descriptor because it
  1315. * is not able to send the data out.
  1316. *
  1317. * For details please refer to Titan User Guide.
  1318. *
  1319. * Returns: VXGE_HW_OK - success.
  1320. * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
  1321. * are currently available for processing.
  1322. *
  1323. */
  1324. enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
  1325. struct __vxge_hw_fifo *fifo, void **txdlh,
  1326. enum vxge_hw_fifo_tcode *t_code)
  1327. {
  1328. struct __vxge_hw_channel *channel;
  1329. struct vxge_hw_fifo_txd *txdp;
  1330. enum vxge_hw_status status = VXGE_HW_OK;
  1331. channel = &fifo->channel;
  1332. vxge_hw_channel_dtr_try_complete(channel, txdlh);
  1333. txdp = *txdlh;
  1334. if (txdp == NULL) {
  1335. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1336. goto exit;
  1337. }
  1338. /* check whether host owns it */
  1339. if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
  1340. vxge_assert(txdp->host_control != 0);
  1341. vxge_hw_channel_dtr_complete(channel);
  1342. *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
  1343. if (fifo->stats->common_stats.usage_cnt > 0)
  1344. fifo->stats->common_stats.usage_cnt--;
  1345. status = VXGE_HW_OK;
  1346. goto exit;
  1347. }
  1348. /* no more completions */
  1349. *txdlh = NULL;
  1350. status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
  1351. exit:
  1352. return status;
  1353. }
  1354. /**
  1355. * vxge_hw_fifo_handle_tcode - Handle transfer code.
  1356. * @fifo: Handle to the fifo object used for non offload send
  1357. * @txdlh: Descriptor handle.
  1358. * @t_code: One of the enumerated (and documented in the Titan user guide)
  1359. * "transfer codes".
  1360. *
  1361. * Handle descriptor's transfer code. The latter comes with each completed
  1362. * descriptor.
  1363. *
  1364. * Returns: one of the enum vxge_hw_status{} enumerated types.
  1365. * VXGE_HW_OK - for success.
  1366. * VXGE_HW_ERR_CRITICAL - when encounters critical error.
  1367. */
  1368. enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
  1369. void *txdlh,
  1370. enum vxge_hw_fifo_tcode t_code)
  1371. {
  1372. enum vxge_hw_status status = VXGE_HW_OK;
  1373. if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
  1374. status = VXGE_HW_ERR_INVALID_TCODE;
  1375. goto exit;
  1376. }
  1377. fifo->stats->txd_t_code_err_cnt[t_code]++;
  1378. exit:
  1379. return status;
  1380. }
  1381. /**
  1382. * vxge_hw_fifo_txdl_free - Free descriptor.
  1383. * @fifo: Handle to the fifo object used for non offload send
  1384. * @txdlh: Descriptor handle.
  1385. *
  1386. * Free the reserved descriptor. This operation is "symmetrical" to
  1387. * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
  1388. * lifecycle.
  1389. *
  1390. * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
  1391. * be:
  1392. *
  1393. * - reserved (vxge_hw_fifo_txdl_reserve);
  1394. *
  1395. * - posted (vxge_hw_fifo_txdl_post);
  1396. *
  1397. * - completed (vxge_hw_fifo_txdl_next_completed);
  1398. *
  1399. * - and recycled again (vxge_hw_fifo_txdl_free).
  1400. *
  1401. * For alternative state transitions and more details please refer to
  1402. * the design doc.
  1403. *
  1404. */
  1405. void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
  1406. {
  1407. struct __vxge_hw_fifo_txdl_priv *txdl_priv;
  1408. u32 max_frags;
  1409. struct __vxge_hw_channel *channel;
  1410. channel = &fifo->channel;
  1411. txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
  1412. (struct vxge_hw_fifo_txd *)txdlh);
  1413. max_frags = fifo->config->max_frags;
  1414. vxge_hw_channel_dtr_free(channel, txdlh);
  1415. }
  1416. /**
  1417. * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
  1418. * to MAC address table.
  1419. * @vp: Vpath handle.
  1420. * @macaddr: MAC address to be added for this vpath into the list
  1421. * @macaddr_mask: MAC address mask for macaddr
  1422. * @duplicate_mode: Duplicate MAC address add mode. Please see
  1423. * enum vxge_hw_vpath_mac_addr_add_mode{}
  1424. *
  1425. * Adds the given mac address and mac address mask into the list for this
  1426. * vpath.
  1427. * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
  1428. * vxge_hw_vpath_mac_addr_get_next
  1429. *
  1430. */
  1431. enum vxge_hw_status
  1432. vxge_hw_vpath_mac_addr_add(
  1433. struct __vxge_hw_vpath_handle *vp,
  1434. u8 (macaddr)[ETH_ALEN],
  1435. u8 (macaddr_mask)[ETH_ALEN],
  1436. enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
  1437. {
  1438. u32 i;
  1439. u64 data1 = 0ULL;
  1440. u64 data2 = 0ULL;
  1441. enum vxge_hw_status status = VXGE_HW_OK;
  1442. if (vp == NULL) {
  1443. status = VXGE_HW_ERR_INVALID_HANDLE;
  1444. goto exit;
  1445. }
  1446. for (i = 0; i < ETH_ALEN; i++) {
  1447. data1 <<= 8;
  1448. data1 |= (u8)macaddr[i];
  1449. data2 <<= 8;
  1450. data2 |= (u8)macaddr_mask[i];
  1451. }
  1452. switch (duplicate_mode) {
  1453. case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
  1454. i = 0;
  1455. break;
  1456. case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
  1457. i = 1;
  1458. break;
  1459. case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
  1460. i = 2;
  1461. break;
  1462. default:
  1463. i = 0;
  1464. break;
  1465. }
  1466. status = __vxge_hw_vpath_rts_table_set(vp,
  1467. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
  1468. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1469. 0,
  1470. VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
  1471. VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
  1472. VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
  1473. exit:
  1474. return status;
  1475. }
  1476. /**
  1477. * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
  1478. * from MAC address table.
  1479. * @vp: Vpath handle.
  1480. * @macaddr: First MAC address entry for this vpath in the list
  1481. * @macaddr_mask: MAC address mask for macaddr
  1482. *
  1483. * Returns the first mac address and mac address mask in the list for this
  1484. * vpath.
  1485. * see also: vxge_hw_vpath_mac_addr_get_next
  1486. *
  1487. */
  1488. enum vxge_hw_status
  1489. vxge_hw_vpath_mac_addr_get(
  1490. struct __vxge_hw_vpath_handle *vp,
  1491. u8 (macaddr)[ETH_ALEN],
  1492. u8 (macaddr_mask)[ETH_ALEN])
  1493. {
  1494. u32 i;
  1495. u64 data1 = 0ULL;
  1496. u64 data2 = 0ULL;
  1497. enum vxge_hw_status status = VXGE_HW_OK;
  1498. if (vp == NULL) {
  1499. status = VXGE_HW_ERR_INVALID_HANDLE;
  1500. goto exit;
  1501. }
  1502. status = __vxge_hw_vpath_rts_table_get(vp,
  1503. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
  1504. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1505. 0, &data1, &data2);
  1506. if (status != VXGE_HW_OK)
  1507. goto exit;
  1508. data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
  1509. data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
  1510. for (i = ETH_ALEN; i > 0; i--) {
  1511. macaddr[i-1] = (u8)(data1 & 0xFF);
  1512. data1 >>= 8;
  1513. macaddr_mask[i-1] = (u8)(data2 & 0xFF);
  1514. data2 >>= 8;
  1515. }
  1516. exit:
  1517. return status;
  1518. }
  1519. /**
  1520. * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
  1521. * vpath
  1522. * from MAC address table.
  1523. * @vp: Vpath handle.
  1524. * @macaddr: Next MAC address entry for this vpath in the list
  1525. * @macaddr_mask: MAC address mask for macaddr
  1526. *
  1527. * Returns the next mac address and mac address mask in the list for this
  1528. * vpath.
  1529. * see also: vxge_hw_vpath_mac_addr_get
  1530. *
  1531. */
  1532. enum vxge_hw_status
  1533. vxge_hw_vpath_mac_addr_get_next(
  1534. struct __vxge_hw_vpath_handle *vp,
  1535. u8 (macaddr)[ETH_ALEN],
  1536. u8 (macaddr_mask)[ETH_ALEN])
  1537. {
  1538. u32 i;
  1539. u64 data1 = 0ULL;
  1540. u64 data2 = 0ULL;
  1541. enum vxge_hw_status status = VXGE_HW_OK;
  1542. if (vp == NULL) {
  1543. status = VXGE_HW_ERR_INVALID_HANDLE;
  1544. goto exit;
  1545. }
  1546. status = __vxge_hw_vpath_rts_table_get(vp,
  1547. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
  1548. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1549. 0, &data1, &data2);
  1550. if (status != VXGE_HW_OK)
  1551. goto exit;
  1552. data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
  1553. data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
  1554. for (i = ETH_ALEN; i > 0; i--) {
  1555. macaddr[i-1] = (u8)(data1 & 0xFF);
  1556. data1 >>= 8;
  1557. macaddr_mask[i-1] = (u8)(data2 & 0xFF);
  1558. data2 >>= 8;
  1559. }
  1560. exit:
  1561. return status;
  1562. }
  1563. /**
  1564. * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
  1565. * to MAC address table.
  1566. * @vp: Vpath handle.
  1567. * @macaddr: MAC address to be added for this vpath into the list
  1568. * @macaddr_mask: MAC address mask for macaddr
  1569. *
  1570. * Delete the given mac address and mac address mask into the list for this
  1571. * vpath.
  1572. * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
  1573. * vxge_hw_vpath_mac_addr_get_next
  1574. *
  1575. */
  1576. enum vxge_hw_status
  1577. vxge_hw_vpath_mac_addr_delete(
  1578. struct __vxge_hw_vpath_handle *vp,
  1579. u8 (macaddr)[ETH_ALEN],
  1580. u8 (macaddr_mask)[ETH_ALEN])
  1581. {
  1582. u32 i;
  1583. u64 data1 = 0ULL;
  1584. u64 data2 = 0ULL;
  1585. enum vxge_hw_status status = VXGE_HW_OK;
  1586. if (vp == NULL) {
  1587. status = VXGE_HW_ERR_INVALID_HANDLE;
  1588. goto exit;
  1589. }
  1590. for (i = 0; i < ETH_ALEN; i++) {
  1591. data1 <<= 8;
  1592. data1 |= (u8)macaddr[i];
  1593. data2 <<= 8;
  1594. data2 |= (u8)macaddr_mask[i];
  1595. }
  1596. status = __vxge_hw_vpath_rts_table_set(vp,
  1597. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
  1598. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
  1599. 0,
  1600. VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
  1601. VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
  1602. exit:
  1603. return status;
  1604. }
  1605. /**
  1606. * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
  1607. * to vlan id table.
  1608. * @vp: Vpath handle.
  1609. * @vid: vlan id to be added for this vpath into the list
  1610. *
  1611. * Adds the given vlan id into the list for this vpath.
  1612. * see also: vxge_hw_vpath_vid_delete
  1613. *
  1614. */
  1615. enum vxge_hw_status
  1616. vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
  1617. {
  1618. enum vxge_hw_status status = VXGE_HW_OK;
  1619. if (vp == NULL) {
  1620. status = VXGE_HW_ERR_INVALID_HANDLE;
  1621. goto exit;
  1622. }
  1623. status = __vxge_hw_vpath_rts_table_set(vp,
  1624. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
  1625. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
  1626. 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
  1627. exit:
  1628. return status;
  1629. }
  1630. /**
  1631. * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
  1632. * to vlan id table.
  1633. * @vp: Vpath handle.
  1634. * @vid: vlan id to be added for this vpath into the list
  1635. *
  1636. * Adds the given vlan id into the list for this vpath.
  1637. * see also: vxge_hw_vpath_vid_add
  1638. *
  1639. */
  1640. enum vxge_hw_status
  1641. vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
  1642. {
  1643. enum vxge_hw_status status = VXGE_HW_OK;
  1644. if (vp == NULL) {
  1645. status = VXGE_HW_ERR_INVALID_HANDLE;
  1646. goto exit;
  1647. }
  1648. status = __vxge_hw_vpath_rts_table_set(vp,
  1649. VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
  1650. VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
  1651. 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
  1652. exit:
  1653. return status;
  1654. }
  1655. /**
  1656. * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
  1657. * @vp: Vpath handle.
  1658. *
  1659. * Enable promiscuous mode of Titan-e operation.
  1660. *
  1661. * See also: vxge_hw_vpath_promisc_disable().
  1662. */
  1663. enum vxge_hw_status vxge_hw_vpath_promisc_enable(
  1664. struct __vxge_hw_vpath_handle *vp)
  1665. {
  1666. u64 val64;
  1667. struct __vxge_hw_virtualpath *vpath;
  1668. enum vxge_hw_status status = VXGE_HW_OK;
  1669. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1670. status = VXGE_HW_ERR_INVALID_HANDLE;
  1671. goto exit;
  1672. }
  1673. vpath = vp->vpath;
  1674. /* Enable promiscuous mode for function 0 only */
  1675. if (!(vpath->hldev->access_rights &
  1676. VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
  1677. return VXGE_HW_OK;
  1678. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1679. if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
  1680. val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
  1681. VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
  1682. VXGE_HW_RXMAC_VCFG0_BCAST_EN |
  1683. VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
  1684. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1685. }
  1686. exit:
  1687. return status;
  1688. }
  1689. /**
  1690. * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
  1691. * @vp: Vpath handle.
  1692. *
  1693. * Disable promiscuous mode of Titan-e operation.
  1694. *
  1695. * See also: vxge_hw_vpath_promisc_enable().
  1696. */
  1697. enum vxge_hw_status vxge_hw_vpath_promisc_disable(
  1698. struct __vxge_hw_vpath_handle *vp)
  1699. {
  1700. u64 val64;
  1701. struct __vxge_hw_virtualpath *vpath;
  1702. enum vxge_hw_status status = VXGE_HW_OK;
  1703. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1704. status = VXGE_HW_ERR_INVALID_HANDLE;
  1705. goto exit;
  1706. }
  1707. vpath = vp->vpath;
  1708. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1709. if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
  1710. val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
  1711. VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
  1712. VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
  1713. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1714. }
  1715. exit:
  1716. return status;
  1717. }
  1718. /*
  1719. * vxge_hw_vpath_bcast_enable - Enable broadcast
  1720. * @vp: Vpath handle.
  1721. *
  1722. * Enable receiving broadcasts.
  1723. */
  1724. enum vxge_hw_status vxge_hw_vpath_bcast_enable(
  1725. struct __vxge_hw_vpath_handle *vp)
  1726. {
  1727. u64 val64;
  1728. struct __vxge_hw_virtualpath *vpath;
  1729. enum vxge_hw_status status = VXGE_HW_OK;
  1730. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1731. status = VXGE_HW_ERR_INVALID_HANDLE;
  1732. goto exit;
  1733. }
  1734. vpath = vp->vpath;
  1735. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1736. if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
  1737. val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
  1738. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1739. }
  1740. exit:
  1741. return status;
  1742. }
  1743. /**
  1744. * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
  1745. * @vp: Vpath handle.
  1746. *
  1747. * Enable Titan-e multicast addresses.
  1748. * Returns: VXGE_HW_OK on success.
  1749. *
  1750. */
  1751. enum vxge_hw_status vxge_hw_vpath_mcast_enable(
  1752. struct __vxge_hw_vpath_handle *vp)
  1753. {
  1754. u64 val64;
  1755. struct __vxge_hw_virtualpath *vpath;
  1756. enum vxge_hw_status status = VXGE_HW_OK;
  1757. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1758. status = VXGE_HW_ERR_INVALID_HANDLE;
  1759. goto exit;
  1760. }
  1761. vpath = vp->vpath;
  1762. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1763. if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
  1764. val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
  1765. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1766. }
  1767. exit:
  1768. return status;
  1769. }
  1770. /**
  1771. * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
  1772. * @vp: Vpath handle.
  1773. *
  1774. * Disable Titan-e multicast addresses.
  1775. * Returns: VXGE_HW_OK - success.
  1776. * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
  1777. *
  1778. */
  1779. enum vxge_hw_status
  1780. vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
  1781. {
  1782. u64 val64;
  1783. struct __vxge_hw_virtualpath *vpath;
  1784. enum vxge_hw_status status = VXGE_HW_OK;
  1785. if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
  1786. status = VXGE_HW_ERR_INVALID_HANDLE;
  1787. goto exit;
  1788. }
  1789. vpath = vp->vpath;
  1790. val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
  1791. if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
  1792. val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
  1793. writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
  1794. }
  1795. exit:
  1796. return status;
  1797. }
  1798. /*
  1799. * vxge_hw_vpath_alarm_process - Process Alarms.
  1800. * @vpath: Virtual Path.
  1801. * @skip_alarms: Do not clear the alarms
  1802. *
  1803. * Process vpath alarms.
  1804. *
  1805. */
  1806. enum vxge_hw_status vxge_hw_vpath_alarm_process(
  1807. struct __vxge_hw_vpath_handle *vp,
  1808. u32 skip_alarms)
  1809. {
  1810. enum vxge_hw_status status = VXGE_HW_OK;
  1811. if (vp == NULL) {
  1812. status = VXGE_HW_ERR_INVALID_HANDLE;
  1813. goto exit;
  1814. }
  1815. status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
  1816. exit:
  1817. return status;
  1818. }
  1819. /**
  1820. * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
  1821. * alrms
  1822. * @vp: Virtual Path handle.
  1823. * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
  1824. * interrupts(Can be repeated). If fifo or ring are not enabled
  1825. * the MSIX vector for that should be set to 0
  1826. * @alarm_msix_id: MSIX vector for alarm.
  1827. *
  1828. * This API will associate a given MSIX vector numbers with the four TIM
  1829. * interrupts and alarm interrupt.
  1830. */
  1831. void
  1832. vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
  1833. int alarm_msix_id)
  1834. {
  1835. u64 val64;
  1836. struct __vxge_hw_virtualpath *vpath = vp->vpath;
  1837. struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
  1838. u32 vp_id = vp->vpath->vp_id;
  1839. val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
  1840. (vp_id * 4) + tim_msix_id[0]) |
  1841. VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
  1842. (vp_id * 4) + tim_msix_id[1]);
  1843. writeq(val64, &vp_reg->interrupt_cfg0);
  1844. writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
  1845. (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
  1846. &vp_reg->interrupt_cfg2);
  1847. if (vpath->hldev->config.intr_mode ==
  1848. VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
  1849. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
  1850. VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
  1851. 0, 32), &vp_reg->one_shot_vect0_en);
  1852. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
  1853. VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
  1854. 0, 32), &vp_reg->one_shot_vect1_en);
  1855. __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
  1856. VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
  1857. 0, 32), &vp_reg->one_shot_vect2_en);
  1858. }
  1859. }
  1860. /**
  1861. * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
  1862. * @vp: Virtual Path handle.
  1863. * @msix_id: MSIX ID
  1864. *
  1865. * The function masks the msix interrupt for the given msix_id
  1866. *
  1867. * Returns: 0,
  1868. * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
  1869. * status.
  1870. * See also:
  1871. */
  1872. void
  1873. vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
  1874. {
  1875. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1876. __vxge_hw_pio_mem_write32_upper(
  1877. (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  1878. &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
  1879. }
  1880. /**
  1881. * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
  1882. * @vp: Virtual Path handle.
  1883. * @msix_id: MSI ID
  1884. *
  1885. * The function clears the msix interrupt for the given msix_id
  1886. *
  1887. * Returns: 0,
  1888. * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
  1889. * status.
  1890. * See also:
  1891. */
  1892. void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
  1893. {
  1894. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1895. if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT))
  1896. __vxge_hw_pio_mem_write32_upper(
  1897. (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
  1898. &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
  1899. else
  1900. __vxge_hw_pio_mem_write32_upper(
  1901. (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
  1902. &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
  1903. }
  1904. /**
  1905. * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
  1906. * @vp: Virtual Path handle.
  1907. * @msix_id: MSI ID
  1908. *
  1909. * The function unmasks the msix interrupt for the given msix_id
  1910. *
  1911. * Returns: 0,
  1912. * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
  1913. * status.
  1914. * See also:
  1915. */
  1916. void
  1917. vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
  1918. {
  1919. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1920. __vxge_hw_pio_mem_write32_upper(
  1921. (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
  1922. &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
  1923. }
  1924. /**
  1925. * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
  1926. * @vp: Virtual Path handle.
  1927. *
  1928. * Mask Tx and Rx vpath interrupts.
  1929. *
  1930. * See also: vxge_hw_vpath_inta_mask_tx_rx()
  1931. */
  1932. void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
  1933. {
  1934. u64 tim_int_mask0[4] = {[0 ...3] = 0};
  1935. u32 tim_int_mask1[4] = {[0 ...3] = 0};
  1936. u64 val64;
  1937. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1938. VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
  1939. tim_int_mask1, vp->vpath->vp_id);
  1940. val64 = readq(&hldev->common_reg->tim_int_mask0);
  1941. if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
  1942. (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
  1943. writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  1944. tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
  1945. &hldev->common_reg->tim_int_mask0);
  1946. }
  1947. val64 = readl(&hldev->common_reg->tim_int_mask1);
  1948. if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
  1949. (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
  1950. __vxge_hw_pio_mem_write32_upper(
  1951. (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  1952. tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
  1953. &hldev->common_reg->tim_int_mask1);
  1954. }
  1955. }
  1956. /**
  1957. * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
  1958. * @vp: Virtual Path handle.
  1959. *
  1960. * Unmask Tx and Rx vpath interrupts.
  1961. *
  1962. * See also: vxge_hw_vpath_inta_mask_tx_rx()
  1963. */
  1964. void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
  1965. {
  1966. u64 tim_int_mask0[4] = {[0 ...3] = 0};
  1967. u32 tim_int_mask1[4] = {[0 ...3] = 0};
  1968. u64 val64;
  1969. struct __vxge_hw_device *hldev = vp->vpath->hldev;
  1970. VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
  1971. tim_int_mask1, vp->vpath->vp_id);
  1972. val64 = readq(&hldev->common_reg->tim_int_mask0);
  1973. if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
  1974. (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
  1975. writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
  1976. tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
  1977. &hldev->common_reg->tim_int_mask0);
  1978. }
  1979. if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
  1980. (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
  1981. __vxge_hw_pio_mem_write32_upper(
  1982. (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
  1983. tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
  1984. &hldev->common_reg->tim_int_mask1);
  1985. }
  1986. }
  1987. /**
  1988. * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
  1989. * descriptors and process the same.
  1990. * @ring: Handle to the ring object used for receive
  1991. *
  1992. * The function polls the Rx for the completed descriptors and calls
  1993. * the driver via supplied completion callback.
  1994. *
  1995. * Returns: VXGE_HW_OK, if the polling is completed successful.
  1996. * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
  1997. * descriptors available which are yet to be processed.
  1998. *
  1999. * See also: vxge_hw_vpath_poll_rx()
  2000. */
  2001. enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
  2002. {
  2003. u8 t_code;
  2004. enum vxge_hw_status status = VXGE_HW_OK;
  2005. void *first_rxdh;
  2006. u64 val64 = 0;
  2007. int new_count = 0;
  2008. ring->cmpl_cnt = 0;
  2009. status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
  2010. if (status == VXGE_HW_OK)
  2011. ring->callback(ring, first_rxdh,
  2012. t_code, ring->channel.userdata);
  2013. if (ring->cmpl_cnt != 0) {
  2014. ring->doorbell_cnt += ring->cmpl_cnt;
  2015. if (ring->doorbell_cnt >= ring->rxds_limit) {
  2016. /*
  2017. * Each RxD is of 4 qwords, update the number of
  2018. * qwords replenished
  2019. */
  2020. new_count = (ring->doorbell_cnt * 4);
  2021. /* For each block add 4 more qwords */
  2022. ring->total_db_cnt += ring->doorbell_cnt;
  2023. if (ring->total_db_cnt >= ring->rxds_per_block) {
  2024. new_count += 4;
  2025. /* Reset total count */
  2026. ring->total_db_cnt %= ring->rxds_per_block;
  2027. }
  2028. writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
  2029. &ring->vp_reg->prc_rxd_doorbell);
  2030. val64 =
  2031. readl(&ring->common_reg->titan_general_int_status);
  2032. ring->doorbell_cnt = 0;
  2033. }
  2034. }
  2035. return status;
  2036. }
  2037. /**
  2038. * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
  2039. * the same.
  2040. * @fifo: Handle to the fifo object used for non offload send
  2041. *
  2042. * The function polls the Tx for the completed descriptors and calls
  2043. * the driver via supplied completion callback.
  2044. *
  2045. * Returns: VXGE_HW_OK, if the polling is completed successful.
  2046. * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
  2047. * descriptors available which are yet to be processed.
  2048. */
  2049. enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
  2050. struct sk_buff ***skb_ptr, int nr_skb,
  2051. int *more)
  2052. {
  2053. enum vxge_hw_fifo_tcode t_code;
  2054. void *first_txdlh;
  2055. enum vxge_hw_status status = VXGE_HW_OK;
  2056. struct __vxge_hw_channel *channel;
  2057. channel = &fifo->channel;
  2058. status = vxge_hw_fifo_txdl_next_completed(fifo,
  2059. &first_txdlh, &t_code);
  2060. if (status == VXGE_HW_OK)
  2061. if (fifo->callback(fifo, first_txdlh, t_code,
  2062. channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
  2063. status = VXGE_HW_COMPLETIONS_REMAIN;
  2064. return status;
  2065. }