qedi_main.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725
  1. /*
  2. * QLogic iSCSI Offload Driver
  3. * Copyright (c) 2016 Cavium Inc.
  4. *
  5. * This software is available under the terms of the GNU General Public License
  6. * (GPL) Version 2, available from the file COPYING in the main directory of
  7. * this source tree.
  8. */
  9. #include <linux/module.h>
  10. #include <linux/pci.h>
  11. #include <linux/kernel.h>
  12. #include <linux/if_arp.h>
  13. #include <scsi/iscsi_if.h>
  14. #include <linux/inet.h>
  15. #include <net/arp.h>
  16. #include <linux/list.h>
  17. #include <linux/kthread.h>
  18. #include <linux/mm.h>
  19. #include <linux/if_vlan.h>
  20. #include <linux/cpu.h>
  21. #include <linux/iscsi_boot_sysfs.h>
  22. #include <scsi/scsi_cmnd.h>
  23. #include <scsi/scsi_device.h>
  24. #include <scsi/scsi_eh.h>
  25. #include <scsi/scsi_host.h>
  26. #include <scsi/scsi.h>
  27. #include "qedi.h"
  28. #include "qedi_gbl.h"
  29. #include "qedi_iscsi.h"
  30. static uint qedi_fw_debug;
  31. module_param(qedi_fw_debug, uint, 0644);
  32. MODULE_PARM_DESC(qedi_fw_debug, " Firmware debug level 0(default) to 3");
  33. uint qedi_dbg_log = QEDI_LOG_WARN | QEDI_LOG_SCSI_TM;
  34. module_param(qedi_dbg_log, uint, 0644);
  35. MODULE_PARM_DESC(qedi_dbg_log, " Default debug level");
  36. uint qedi_io_tracing;
  37. module_param(qedi_io_tracing, uint, 0644);
  38. MODULE_PARM_DESC(qedi_io_tracing,
  39. " Enable logging of SCSI requests/completions into trace buffer. (default off).");
  40. const struct qed_iscsi_ops *qedi_ops;
  41. static struct scsi_transport_template *qedi_scsi_transport;
  42. static struct pci_driver qedi_pci_driver;
  43. static DEFINE_PER_CPU(struct qedi_percpu_s, qedi_percpu);
  44. static LIST_HEAD(qedi_udev_list);
  45. /* Static function declaration */
  46. static int qedi_alloc_global_queues(struct qedi_ctx *qedi);
  47. static void qedi_free_global_queues(struct qedi_ctx *qedi);
  48. static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid);
  49. static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
  50. static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
  51. static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi);
  52. static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
  53. {
  54. struct qedi_ctx *qedi;
  55. struct qedi_endpoint *qedi_ep;
  56. struct iscsi_eqe_data *data;
  57. int rval = 0;
  58. if (!context || !fw_handle) {
  59. QEDI_ERR(NULL, "Recv event with ctx NULL\n");
  60. return -EINVAL;
  61. }
  62. qedi = (struct qedi_ctx *)context;
  63. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  64. "Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
  65. data = (struct iscsi_eqe_data *)fw_handle;
  66. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  67. "icid=0x%x conn_id=0x%x err-code=0x%x error-pdu-opcode-reserved=0x%x\n",
  68. data->icid, data->conn_id, data->error_code,
  69. data->error_pdu_opcode_reserved);
  70. qedi_ep = qedi->ep_tbl[data->icid];
  71. if (!qedi_ep) {
  72. QEDI_WARN(&qedi->dbg_ctx,
  73. "Cannot process event, ep already disconnected, cid=0x%x\n",
  74. data->icid);
  75. WARN_ON(1);
  76. return -ENODEV;
  77. }
  78. switch (fw_event_code) {
  79. case ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE:
  80. if (qedi_ep->state == EP_STATE_OFLDCONN_START)
  81. qedi_ep->state = EP_STATE_OFLDCONN_COMPL;
  82. wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
  83. break;
  84. case ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE:
  85. qedi_ep->state = EP_STATE_DISCONN_COMPL;
  86. wake_up_interruptible(&qedi_ep->tcp_ofld_wait);
  87. break;
  88. case ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR:
  89. qedi_process_iscsi_error(qedi_ep, data);
  90. break;
  91. case ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD:
  92. case ISCSI_EVENT_TYPE_ASYN_SYN_RCVD:
  93. case ISCSI_EVENT_TYPE_ASYN_MAX_RT_TIME:
  94. case ISCSI_EVENT_TYPE_ASYN_MAX_RT_CNT:
  95. case ISCSI_EVENT_TYPE_ASYN_MAX_KA_PROBES_CNT:
  96. case ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2:
  97. case ISCSI_EVENT_TYPE_TCP_CONN_ERROR:
  98. qedi_process_tcp_error(qedi_ep, data);
  99. break;
  100. default:
  101. QEDI_ERR(&qedi->dbg_ctx, "Recv Unknown Event %u\n",
  102. fw_event_code);
  103. }
  104. return rval;
  105. }
  106. static int qedi_uio_open(struct uio_info *uinfo, struct inode *inode)
  107. {
  108. struct qedi_uio_dev *udev = uinfo->priv;
  109. struct qedi_ctx *qedi = udev->qedi;
  110. if (!capable(CAP_NET_ADMIN))
  111. return -EPERM;
  112. if (udev->uio_dev != -1)
  113. return -EBUSY;
  114. rtnl_lock();
  115. udev->uio_dev = iminor(inode);
  116. qedi_reset_uio_rings(udev);
  117. set_bit(UIO_DEV_OPENED, &qedi->flags);
  118. rtnl_unlock();
  119. return 0;
  120. }
  121. static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode)
  122. {
  123. struct qedi_uio_dev *udev = uinfo->priv;
  124. struct qedi_ctx *qedi = udev->qedi;
  125. udev->uio_dev = -1;
  126. clear_bit(UIO_DEV_OPENED, &qedi->flags);
  127. qedi_ll2_free_skbs(qedi);
  128. return 0;
  129. }
  130. static void __qedi_free_uio_rings(struct qedi_uio_dev *udev)
  131. {
  132. if (udev->uctrl) {
  133. free_page((unsigned long)udev->uctrl);
  134. udev->uctrl = NULL;
  135. }
  136. if (udev->ll2_ring) {
  137. free_page((unsigned long)udev->ll2_ring);
  138. udev->ll2_ring = NULL;
  139. }
  140. if (udev->ll2_buf) {
  141. free_pages((unsigned long)udev->ll2_buf, 2);
  142. udev->ll2_buf = NULL;
  143. }
  144. }
  145. static void __qedi_free_uio(struct qedi_uio_dev *udev)
  146. {
  147. uio_unregister_device(&udev->qedi_uinfo);
  148. __qedi_free_uio_rings(udev);
  149. pci_dev_put(udev->pdev);
  150. kfree(udev);
  151. }
  152. static void qedi_free_uio(struct qedi_uio_dev *udev)
  153. {
  154. if (!udev)
  155. return;
  156. list_del_init(&udev->list);
  157. __qedi_free_uio(udev);
  158. }
  159. static void qedi_reset_uio_rings(struct qedi_uio_dev *udev)
  160. {
  161. struct qedi_ctx *qedi = NULL;
  162. struct qedi_uio_ctrl *uctrl = NULL;
  163. qedi = udev->qedi;
  164. uctrl = udev->uctrl;
  165. spin_lock_bh(&qedi->ll2_lock);
  166. uctrl->host_rx_cons = 0;
  167. uctrl->hw_rx_prod = 0;
  168. uctrl->hw_rx_bd_prod = 0;
  169. uctrl->host_rx_bd_cons = 0;
  170. memset(udev->ll2_ring, 0, udev->ll2_ring_size);
  171. memset(udev->ll2_buf, 0, udev->ll2_buf_size);
  172. spin_unlock_bh(&qedi->ll2_lock);
  173. }
  174. static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev)
  175. {
  176. int rc = 0;
  177. if (udev->ll2_ring || udev->ll2_buf)
  178. return rc;
  179. /* Memory for control area. */
  180. udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL);
  181. if (!udev->uctrl)
  182. return -ENOMEM;
  183. /* Allocating memory for LL2 ring */
  184. udev->ll2_ring_size = QEDI_PAGE_SIZE;
  185. udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP);
  186. if (!udev->ll2_ring) {
  187. rc = -ENOMEM;
  188. goto exit_alloc_ring;
  189. }
  190. /* Allocating memory for Tx/Rx pkt buffer */
  191. udev->ll2_buf_size = TX_RX_RING * LL2_SINGLE_BUF_SIZE;
  192. udev->ll2_buf_size = QEDI_PAGE_ALIGN(udev->ll2_buf_size);
  193. udev->ll2_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_COMP |
  194. __GFP_ZERO, 2);
  195. if (!udev->ll2_buf) {
  196. rc = -ENOMEM;
  197. goto exit_alloc_buf;
  198. }
  199. return rc;
  200. exit_alloc_buf:
  201. free_page((unsigned long)udev->ll2_ring);
  202. udev->ll2_ring = NULL;
  203. exit_alloc_ring:
  204. return rc;
  205. }
  206. static int qedi_alloc_uio_rings(struct qedi_ctx *qedi)
  207. {
  208. struct qedi_uio_dev *udev = NULL;
  209. int rc = 0;
  210. list_for_each_entry(udev, &qedi_udev_list, list) {
  211. if (udev->pdev == qedi->pdev) {
  212. udev->qedi = qedi;
  213. if (__qedi_alloc_uio_rings(udev)) {
  214. udev->qedi = NULL;
  215. return -ENOMEM;
  216. }
  217. qedi->udev = udev;
  218. return 0;
  219. }
  220. }
  221. udev = kzalloc(sizeof(*udev), GFP_KERNEL);
  222. if (!udev) {
  223. rc = -ENOMEM;
  224. goto err_udev;
  225. }
  226. udev->uio_dev = -1;
  227. udev->qedi = qedi;
  228. udev->pdev = qedi->pdev;
  229. rc = __qedi_alloc_uio_rings(udev);
  230. if (rc)
  231. goto err_uctrl;
  232. list_add(&udev->list, &qedi_udev_list);
  233. pci_dev_get(udev->pdev);
  234. qedi->udev = udev;
  235. udev->tx_pkt = udev->ll2_buf;
  236. udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE;
  237. return 0;
  238. err_uctrl:
  239. kfree(udev);
  240. err_udev:
  241. return -ENOMEM;
  242. }
  243. static int qedi_init_uio(struct qedi_ctx *qedi)
  244. {
  245. struct qedi_uio_dev *udev = qedi->udev;
  246. struct uio_info *uinfo;
  247. int ret = 0;
  248. if (!udev)
  249. return -ENOMEM;
  250. uinfo = &udev->qedi_uinfo;
  251. uinfo->mem[0].addr = (unsigned long)udev->uctrl;
  252. uinfo->mem[0].size = sizeof(struct qedi_uio_ctrl);
  253. uinfo->mem[0].memtype = UIO_MEM_LOGICAL;
  254. uinfo->mem[1].addr = (unsigned long)udev->ll2_ring;
  255. uinfo->mem[1].size = udev->ll2_ring_size;
  256. uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
  257. uinfo->mem[2].addr = (unsigned long)udev->ll2_buf;
  258. uinfo->mem[2].size = udev->ll2_buf_size;
  259. uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
  260. uinfo->name = "qedi_uio";
  261. uinfo->version = QEDI_MODULE_VERSION;
  262. uinfo->irq = UIO_IRQ_CUSTOM;
  263. uinfo->open = qedi_uio_open;
  264. uinfo->release = qedi_uio_close;
  265. if (udev->uio_dev == -1) {
  266. if (!uinfo->priv) {
  267. uinfo->priv = udev;
  268. ret = uio_register_device(&udev->pdev->dev, uinfo);
  269. if (ret) {
  270. QEDI_ERR(&qedi->dbg_ctx,
  271. "UIO registration failed\n");
  272. }
  273. }
  274. }
  275. return ret;
  276. }
  277. static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
  278. struct qed_sb_info *sb_info, u16 sb_id)
  279. {
  280. struct status_block_e4 *sb_virt;
  281. dma_addr_t sb_phys;
  282. int ret;
  283. sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
  284. sizeof(struct status_block_e4), &sb_phys,
  285. GFP_KERNEL);
  286. if (!sb_virt) {
  287. QEDI_ERR(&qedi->dbg_ctx,
  288. "Status block allocation failed for id = %d.\n",
  289. sb_id);
  290. return -ENOMEM;
  291. }
  292. ret = qedi_ops->common->sb_init(qedi->cdev, sb_info, sb_virt, sb_phys,
  293. sb_id, QED_SB_TYPE_STORAGE);
  294. if (ret) {
  295. QEDI_ERR(&qedi->dbg_ctx,
  296. "Status block initialization failed for id = %d.\n",
  297. sb_id);
  298. return ret;
  299. }
  300. return 0;
  301. }
  302. static void qedi_free_sb(struct qedi_ctx *qedi)
  303. {
  304. struct qed_sb_info *sb_info;
  305. int id;
  306. for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
  307. sb_info = &qedi->sb_array[id];
  308. if (sb_info->sb_virt)
  309. dma_free_coherent(&qedi->pdev->dev,
  310. sizeof(*sb_info->sb_virt),
  311. (void *)sb_info->sb_virt,
  312. sb_info->sb_phys);
  313. }
  314. }
  315. static void qedi_free_fp(struct qedi_ctx *qedi)
  316. {
  317. kfree(qedi->fp_array);
  318. kfree(qedi->sb_array);
  319. }
  320. static void qedi_destroy_fp(struct qedi_ctx *qedi)
  321. {
  322. qedi_free_sb(qedi);
  323. qedi_free_fp(qedi);
  324. }
  325. static int qedi_alloc_fp(struct qedi_ctx *qedi)
  326. {
  327. int ret = 0;
  328. qedi->fp_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
  329. sizeof(struct qedi_fastpath), GFP_KERNEL);
  330. if (!qedi->fp_array) {
  331. QEDI_ERR(&qedi->dbg_ctx,
  332. "fastpath fp array allocation failed.\n");
  333. return -ENOMEM;
  334. }
  335. qedi->sb_array = kcalloc(MIN_NUM_CPUS_MSIX(qedi),
  336. sizeof(struct qed_sb_info), GFP_KERNEL);
  337. if (!qedi->sb_array) {
  338. QEDI_ERR(&qedi->dbg_ctx,
  339. "fastpath sb array allocation failed.\n");
  340. ret = -ENOMEM;
  341. goto free_fp;
  342. }
  343. return ret;
  344. free_fp:
  345. qedi_free_fp(qedi);
  346. return ret;
  347. }
  348. static void qedi_int_fp(struct qedi_ctx *qedi)
  349. {
  350. struct qedi_fastpath *fp;
  351. int id;
  352. memset(qedi->fp_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
  353. sizeof(*qedi->fp_array));
  354. memset(qedi->sb_array, 0, MIN_NUM_CPUS_MSIX(qedi) *
  355. sizeof(*qedi->sb_array));
  356. for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
  357. fp = &qedi->fp_array[id];
  358. fp->sb_info = &qedi->sb_array[id];
  359. fp->sb_id = id;
  360. fp->qedi = qedi;
  361. snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
  362. "qedi", id);
  363. /* fp_array[i] ---- irq cookie
  364. * So init data which is needed in int ctx
  365. */
  366. }
  367. }
  368. static int qedi_prepare_fp(struct qedi_ctx *qedi)
  369. {
  370. struct qedi_fastpath *fp;
  371. int id, ret = 0;
  372. ret = qedi_alloc_fp(qedi);
  373. if (ret)
  374. goto err;
  375. qedi_int_fp(qedi);
  376. for (id = 0; id < MIN_NUM_CPUS_MSIX(qedi); id++) {
  377. fp = &qedi->fp_array[id];
  378. ret = qedi_alloc_and_init_sb(qedi, fp->sb_info, fp->sb_id);
  379. if (ret) {
  380. QEDI_ERR(&qedi->dbg_ctx,
  381. "SB allocation and initialization failed.\n");
  382. ret = -EIO;
  383. goto err_init;
  384. }
  385. }
  386. return 0;
  387. err_init:
  388. qedi_free_sb(qedi);
  389. qedi_free_fp(qedi);
  390. err:
  391. return ret;
  392. }
  393. static int qedi_setup_cid_que(struct qedi_ctx *qedi)
  394. {
  395. int i;
  396. qedi->cid_que.cid_que_base = kmalloc_array(qedi->max_active_conns,
  397. sizeof(u32), GFP_KERNEL);
  398. if (!qedi->cid_que.cid_que_base)
  399. return -ENOMEM;
  400. qedi->cid_que.conn_cid_tbl = kmalloc_array(qedi->max_active_conns,
  401. sizeof(struct qedi_conn *),
  402. GFP_KERNEL);
  403. if (!qedi->cid_que.conn_cid_tbl) {
  404. kfree(qedi->cid_que.cid_que_base);
  405. qedi->cid_que.cid_que_base = NULL;
  406. return -ENOMEM;
  407. }
  408. qedi->cid_que.cid_que = (u32 *)qedi->cid_que.cid_que_base;
  409. qedi->cid_que.cid_q_prod_idx = 0;
  410. qedi->cid_que.cid_q_cons_idx = 0;
  411. qedi->cid_que.cid_q_max_idx = qedi->max_active_conns;
  412. qedi->cid_que.cid_free_cnt = qedi->max_active_conns;
  413. for (i = 0; i < qedi->max_active_conns; i++) {
  414. qedi->cid_que.cid_que[i] = i;
  415. qedi->cid_que.conn_cid_tbl[i] = NULL;
  416. }
  417. return 0;
  418. }
  419. static void qedi_release_cid_que(struct qedi_ctx *qedi)
  420. {
  421. kfree(qedi->cid_que.cid_que_base);
  422. qedi->cid_que.cid_que_base = NULL;
  423. kfree(qedi->cid_que.conn_cid_tbl);
  424. qedi->cid_que.conn_cid_tbl = NULL;
  425. }
  426. static int qedi_init_id_tbl(struct qedi_portid_tbl *id_tbl, u16 size,
  427. u16 start_id, u16 next)
  428. {
  429. id_tbl->start = start_id;
  430. id_tbl->max = size;
  431. id_tbl->next = next;
  432. spin_lock_init(&id_tbl->lock);
  433. id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
  434. if (!id_tbl->table)
  435. return -ENOMEM;
  436. return 0;
  437. }
  438. static void qedi_free_id_tbl(struct qedi_portid_tbl *id_tbl)
  439. {
  440. kfree(id_tbl->table);
  441. id_tbl->table = NULL;
  442. }
  443. int qedi_alloc_id(struct qedi_portid_tbl *id_tbl, u16 id)
  444. {
  445. int ret = -1;
  446. id -= id_tbl->start;
  447. if (id >= id_tbl->max)
  448. return ret;
  449. spin_lock(&id_tbl->lock);
  450. if (!test_bit(id, id_tbl->table)) {
  451. set_bit(id, id_tbl->table);
  452. ret = 0;
  453. }
  454. spin_unlock(&id_tbl->lock);
  455. return ret;
  456. }
  457. u16 qedi_alloc_new_id(struct qedi_portid_tbl *id_tbl)
  458. {
  459. u16 id;
  460. spin_lock(&id_tbl->lock);
  461. id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
  462. if (id >= id_tbl->max) {
  463. id = QEDI_LOCAL_PORT_INVALID;
  464. if (id_tbl->next != 0) {
  465. id = find_first_zero_bit(id_tbl->table, id_tbl->next);
  466. if (id >= id_tbl->next)
  467. id = QEDI_LOCAL_PORT_INVALID;
  468. }
  469. }
  470. if (id < id_tbl->max) {
  471. set_bit(id, id_tbl->table);
  472. id_tbl->next = (id + 1) & (id_tbl->max - 1);
  473. id += id_tbl->start;
  474. }
  475. spin_unlock(&id_tbl->lock);
  476. return id;
  477. }
  478. void qedi_free_id(struct qedi_portid_tbl *id_tbl, u16 id)
  479. {
  480. if (id == QEDI_LOCAL_PORT_INVALID)
  481. return;
  482. id -= id_tbl->start;
  483. if (id >= id_tbl->max)
  484. return;
  485. clear_bit(id, id_tbl->table);
  486. }
  487. static void qedi_cm_free_mem(struct qedi_ctx *qedi)
  488. {
  489. kfree(qedi->ep_tbl);
  490. qedi->ep_tbl = NULL;
  491. qedi_free_id_tbl(&qedi->lcl_port_tbl);
  492. }
  493. static int qedi_cm_alloc_mem(struct qedi_ctx *qedi)
  494. {
  495. u16 port_id;
  496. qedi->ep_tbl = kzalloc((qedi->max_active_conns *
  497. sizeof(struct qedi_endpoint *)), GFP_KERNEL);
  498. if (!qedi->ep_tbl)
  499. return -ENOMEM;
  500. port_id = prandom_u32() % QEDI_LOCAL_PORT_RANGE;
  501. if (qedi_init_id_tbl(&qedi->lcl_port_tbl, QEDI_LOCAL_PORT_RANGE,
  502. QEDI_LOCAL_PORT_MIN, port_id)) {
  503. qedi_cm_free_mem(qedi);
  504. return -ENOMEM;
  505. }
  506. return 0;
  507. }
  508. static struct qedi_ctx *qedi_host_alloc(struct pci_dev *pdev)
  509. {
  510. struct Scsi_Host *shost;
  511. struct qedi_ctx *qedi = NULL;
  512. shost = iscsi_host_alloc(&qedi_host_template,
  513. sizeof(struct qedi_ctx), 0);
  514. if (!shost) {
  515. QEDI_ERR(NULL, "Could not allocate shost\n");
  516. goto exit_setup_shost;
  517. }
  518. shost->max_id = QEDI_MAX_ISCSI_CONNS_PER_HBA;
  519. shost->max_channel = 0;
  520. shost->max_lun = ~0;
  521. shost->max_cmd_len = 16;
  522. shost->transportt = qedi_scsi_transport;
  523. qedi = iscsi_host_priv(shost);
  524. memset(qedi, 0, sizeof(*qedi));
  525. qedi->shost = shost;
  526. qedi->dbg_ctx.host_no = shost->host_no;
  527. qedi->pdev = pdev;
  528. qedi->dbg_ctx.pdev = pdev;
  529. qedi->max_active_conns = ISCSI_MAX_SESS_PER_HBA;
  530. qedi->max_sqes = QEDI_SQ_SIZE;
  531. if (shost_use_blk_mq(shost))
  532. shost->nr_hw_queues = MIN_NUM_CPUS_MSIX(qedi);
  533. pci_set_drvdata(pdev, qedi);
  534. exit_setup_shost:
  535. return qedi;
  536. }
  537. static int qedi_ll2_rx(void *cookie, struct sk_buff *skb, u32 arg1, u32 arg2)
  538. {
  539. struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
  540. struct qedi_uio_dev *udev;
  541. struct qedi_uio_ctrl *uctrl;
  542. struct skb_work_list *work;
  543. u32 prod;
  544. if (!qedi) {
  545. QEDI_ERR(NULL, "qedi is NULL\n");
  546. return -1;
  547. }
  548. if (!test_bit(UIO_DEV_OPENED, &qedi->flags)) {
  549. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_UIO,
  550. "UIO DEV is not opened\n");
  551. kfree_skb(skb);
  552. return 0;
  553. }
  554. udev = qedi->udev;
  555. uctrl = udev->uctrl;
  556. work = kzalloc(sizeof(*work), GFP_ATOMIC);
  557. if (!work) {
  558. QEDI_WARN(&qedi->dbg_ctx,
  559. "Could not allocate work so dropping frame.\n");
  560. kfree_skb(skb);
  561. return 0;
  562. }
  563. INIT_LIST_HEAD(&work->list);
  564. work->skb = skb;
  565. if (skb_vlan_tag_present(skb))
  566. work->vlan_id = skb_vlan_tag_get(skb);
  567. if (work->vlan_id)
  568. __vlan_insert_tag(work->skb, htons(ETH_P_8021Q), work->vlan_id);
  569. spin_lock_bh(&qedi->ll2_lock);
  570. list_add_tail(&work->list, &qedi->ll2_skb_list);
  571. ++uctrl->hw_rx_prod_cnt;
  572. prod = (uctrl->hw_rx_prod + 1) % RX_RING;
  573. if (prod != uctrl->host_rx_cons) {
  574. uctrl->hw_rx_prod = prod;
  575. spin_unlock_bh(&qedi->ll2_lock);
  576. wake_up_process(qedi->ll2_recv_thread);
  577. return 0;
  578. }
  579. spin_unlock_bh(&qedi->ll2_lock);
  580. return 0;
  581. }
  582. /* map this skb to iscsiuio mmaped region */
  583. static int qedi_ll2_process_skb(struct qedi_ctx *qedi, struct sk_buff *skb,
  584. u16 vlan_id)
  585. {
  586. struct qedi_uio_dev *udev = NULL;
  587. struct qedi_uio_ctrl *uctrl = NULL;
  588. struct qedi_rx_bd rxbd;
  589. struct qedi_rx_bd *p_rxbd;
  590. u32 rx_bd_prod;
  591. void *pkt;
  592. int len = 0;
  593. if (!qedi) {
  594. QEDI_ERR(NULL, "qedi is NULL\n");
  595. return -1;
  596. }
  597. udev = qedi->udev;
  598. uctrl = udev->uctrl;
  599. pkt = udev->rx_pkt + (uctrl->hw_rx_prod * LL2_SINGLE_BUF_SIZE);
  600. len = min_t(u32, skb->len, (u32)LL2_SINGLE_BUF_SIZE);
  601. memcpy(pkt, skb->data, len);
  602. memset(&rxbd, 0, sizeof(rxbd));
  603. rxbd.rx_pkt_index = uctrl->hw_rx_prod;
  604. rxbd.rx_pkt_len = len;
  605. rxbd.vlan_id = vlan_id;
  606. uctrl->hw_rx_bd_prod = (uctrl->hw_rx_bd_prod + 1) % QEDI_NUM_RX_BD;
  607. rx_bd_prod = uctrl->hw_rx_bd_prod;
  608. p_rxbd = (struct qedi_rx_bd *)udev->ll2_ring;
  609. p_rxbd += rx_bd_prod;
  610. memcpy(p_rxbd, &rxbd, sizeof(rxbd));
  611. /* notify the iscsiuio about new packet */
  612. uio_event_notify(&udev->qedi_uinfo);
  613. return 0;
  614. }
  615. static void qedi_ll2_free_skbs(struct qedi_ctx *qedi)
  616. {
  617. struct skb_work_list *work, *work_tmp;
  618. spin_lock_bh(&qedi->ll2_lock);
  619. list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) {
  620. list_del(&work->list);
  621. if (work->skb)
  622. kfree_skb(work->skb);
  623. kfree(work);
  624. }
  625. spin_unlock_bh(&qedi->ll2_lock);
  626. }
  627. static int qedi_ll2_recv_thread(void *arg)
  628. {
  629. struct qedi_ctx *qedi = (struct qedi_ctx *)arg;
  630. struct skb_work_list *work, *work_tmp;
  631. set_user_nice(current, -20);
  632. while (!kthread_should_stop()) {
  633. spin_lock_bh(&qedi->ll2_lock);
  634. list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list,
  635. list) {
  636. list_del(&work->list);
  637. qedi_ll2_process_skb(qedi, work->skb, work->vlan_id);
  638. kfree_skb(work->skb);
  639. kfree(work);
  640. }
  641. set_current_state(TASK_INTERRUPTIBLE);
  642. spin_unlock_bh(&qedi->ll2_lock);
  643. schedule();
  644. }
  645. __set_current_state(TASK_RUNNING);
  646. return 0;
  647. }
  648. static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
  649. {
  650. u8 num_sq_pages;
  651. u32 log_page_size;
  652. int rval = 0;
  653. num_sq_pages = (MAX_OUSTANDING_TASKS_PER_CON * 8) / PAGE_SIZE;
  654. qedi->num_queues = MIN_NUM_CPUS_MSIX(qedi);
  655. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  656. "Number of CQ count is %d\n", qedi->num_queues);
  657. memset(&qedi->pf_params.iscsi_pf_params, 0,
  658. sizeof(qedi->pf_params.iscsi_pf_params));
  659. qedi->p_cpuq = pci_alloc_consistent(qedi->pdev,
  660. qedi->num_queues * sizeof(struct qedi_glbl_q_params),
  661. &qedi->hw_p_cpuq);
  662. if (!qedi->p_cpuq) {
  663. QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n");
  664. rval = -1;
  665. goto err_alloc_mem;
  666. }
  667. rval = qedi_alloc_global_queues(qedi);
  668. if (rval) {
  669. QEDI_ERR(&qedi->dbg_ctx, "Global queue allocation failed.\n");
  670. rval = -1;
  671. goto err_alloc_mem;
  672. }
  673. qedi->pf_params.iscsi_pf_params.num_cons = QEDI_MAX_ISCSI_CONNS_PER_HBA;
  674. qedi->pf_params.iscsi_pf_params.num_tasks = QEDI_MAX_ISCSI_TASK;
  675. qedi->pf_params.iscsi_pf_params.half_way_close_timeout = 10;
  676. qedi->pf_params.iscsi_pf_params.num_sq_pages_in_ring = num_sq_pages;
  677. qedi->pf_params.iscsi_pf_params.num_r2tq_pages_in_ring = num_sq_pages;
  678. qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
  679. qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
  680. qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
  681. qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000;
  682. qedi->pf_params.iscsi_pf_params.max_fin_rt = 2;
  683. for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
  684. if ((1 << log_page_size) == PAGE_SIZE)
  685. break;
  686. }
  687. qedi->pf_params.iscsi_pf_params.log_page_size = log_page_size;
  688. qedi->pf_params.iscsi_pf_params.glbl_q_params_addr =
  689. (u64)qedi->hw_p_cpuq;
  690. /* RQ BDQ initializations.
  691. * rq_num_entries: suggested value for Initiator is 16 (4KB RQ)
  692. * rqe_log_size: 8 for 256B RQE
  693. */
  694. qedi->pf_params.iscsi_pf_params.rqe_log_size = 8;
  695. /* BDQ address and size */
  696. qedi->pf_params.iscsi_pf_params.bdq_pbl_base_addr[BDQ_ID_RQ] =
  697. qedi->bdq_pbl_list_dma;
  698. qedi->pf_params.iscsi_pf_params.bdq_pbl_num_entries[BDQ_ID_RQ] =
  699. qedi->bdq_pbl_list_num_entries;
  700. qedi->pf_params.iscsi_pf_params.rq_buffer_size = QEDI_BDQ_BUF_SIZE;
  701. /* cq_num_entries: num_tasks + rq_num_entries */
  702. qedi->pf_params.iscsi_pf_params.cq_num_entries = 2048;
  703. qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
  704. qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
  705. err_alloc_mem:
  706. return rval;
  707. }
  708. /* Free DMA coherent memory for array of queue pointers we pass to qed */
  709. static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi)
  710. {
  711. size_t size = 0;
  712. if (qedi->p_cpuq) {
  713. size = qedi->num_queues * sizeof(struct qedi_glbl_q_params);
  714. pci_free_consistent(qedi->pdev, size, qedi->p_cpuq,
  715. qedi->hw_p_cpuq);
  716. }
  717. qedi_free_global_queues(qedi);
  718. kfree(qedi->global_queues);
  719. }
  720. static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
  721. struct qedi_boot_target *tgt, u8 index)
  722. {
  723. u32 ipv6_en;
  724. ipv6_en = !!(block->generic.ctrl_flags &
  725. NVM_ISCSI_CFG_GEN_IPV6_ENABLED);
  726. snprintf(tgt->iscsi_name, sizeof(tgt->iscsi_name), "%s\n",
  727. block->target[index].target_name.byte);
  728. tgt->ipv6_en = ipv6_en;
  729. if (ipv6_en)
  730. snprintf(tgt->ip_addr, IPV6_LEN, "%pI6\n",
  731. block->target[index].ipv6_addr.byte);
  732. else
  733. snprintf(tgt->ip_addr, IPV4_LEN, "%pI4\n",
  734. block->target[index].ipv4_addr.byte);
  735. }
  736. static int qedi_find_boot_info(struct qedi_ctx *qedi,
  737. struct qed_mfw_tlv_iscsi *iscsi,
  738. struct nvm_iscsi_block *block)
  739. {
  740. struct qedi_boot_target *pri_tgt = NULL, *sec_tgt = NULL;
  741. u32 pri_ctrl_flags = 0, sec_ctrl_flags = 0, found = 0;
  742. struct iscsi_cls_session *cls_sess;
  743. struct iscsi_cls_conn *cls_conn;
  744. struct qedi_conn *qedi_conn;
  745. struct iscsi_session *sess;
  746. struct iscsi_conn *conn;
  747. char ep_ip_addr[64];
  748. int i, ret = 0;
  749. pri_ctrl_flags = !!(block->target[0].ctrl_flags &
  750. NVM_ISCSI_CFG_TARGET_ENABLED);
  751. if (pri_ctrl_flags) {
  752. pri_tgt = kzalloc(sizeof(*pri_tgt), GFP_KERNEL);
  753. if (!pri_tgt)
  754. return -1;
  755. qedi_get_boot_tgt_info(block, pri_tgt, 0);
  756. }
  757. sec_ctrl_flags = !!(block->target[1].ctrl_flags &
  758. NVM_ISCSI_CFG_TARGET_ENABLED);
  759. if (sec_ctrl_flags) {
  760. sec_tgt = kzalloc(sizeof(*sec_tgt), GFP_KERNEL);
  761. if (!sec_tgt) {
  762. ret = -1;
  763. goto free_tgt;
  764. }
  765. qedi_get_boot_tgt_info(block, sec_tgt, 1);
  766. }
  767. for (i = 0; i < qedi->max_active_conns; i++) {
  768. qedi_conn = qedi_get_conn_from_id(qedi, i);
  769. if (!qedi_conn)
  770. continue;
  771. if (qedi_conn->ep->ip_type == TCP_IPV4)
  772. snprintf(ep_ip_addr, IPV4_LEN, "%pI4\n",
  773. qedi_conn->ep->dst_addr);
  774. else
  775. snprintf(ep_ip_addr, IPV6_LEN, "%pI6\n",
  776. qedi_conn->ep->dst_addr);
  777. cls_conn = qedi_conn->cls_conn;
  778. conn = cls_conn->dd_data;
  779. cls_sess = iscsi_conn_to_session(cls_conn);
  780. sess = cls_sess->dd_data;
  781. if (!iscsi_is_session_online(cls_sess))
  782. continue;
  783. if (!sess->targetname)
  784. continue;
  785. if (pri_ctrl_flags) {
  786. if (!strcmp(pri_tgt->iscsi_name, sess->targetname) &&
  787. !strcmp(pri_tgt->ip_addr, ep_ip_addr)) {
  788. found = 1;
  789. break;
  790. }
  791. }
  792. if (sec_ctrl_flags) {
  793. if (!strcmp(sec_tgt->iscsi_name, sess->targetname) &&
  794. !strcmp(sec_tgt->ip_addr, ep_ip_addr)) {
  795. found = 1;
  796. break;
  797. }
  798. }
  799. }
  800. if (found) {
  801. if (conn->hdrdgst_en) {
  802. iscsi->header_digest_set = true;
  803. iscsi->header_digest = 1;
  804. }
  805. if (conn->datadgst_en) {
  806. iscsi->data_digest_set = true;
  807. iscsi->data_digest = 1;
  808. }
  809. iscsi->boot_taget_portal_set = true;
  810. iscsi->boot_taget_portal = sess->tpgt;
  811. } else {
  812. ret = -1;
  813. }
  814. if (sec_ctrl_flags)
  815. kfree(sec_tgt);
  816. free_tgt:
  817. if (pri_ctrl_flags)
  818. kfree(pri_tgt);
  819. return ret;
  820. }
  821. static void qedi_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
  822. {
  823. struct qedi_ctx *qedi;
  824. if (!dev) {
  825. QEDI_INFO(NULL, QEDI_LOG_EVT,
  826. "dev is NULL so ignoring get_generic_tlv_data request.\n");
  827. return;
  828. }
  829. qedi = (struct qedi_ctx *)dev;
  830. memset(data, 0, sizeof(struct qed_generic_tlvs));
  831. ether_addr_copy(data->mac[0], qedi->mac);
  832. }
  833. /*
  834. * Protocol TLV handler
  835. */
  836. static void qedi_get_protocol_tlv_data(void *dev, void *data)
  837. {
  838. struct qed_mfw_tlv_iscsi *iscsi = data;
  839. struct qed_iscsi_stats *fw_iscsi_stats;
  840. struct nvm_iscsi_block *block = NULL;
  841. u32 chap_en = 0, mchap_en = 0;
  842. struct qedi_ctx *qedi = dev;
  843. int rval = 0;
  844. fw_iscsi_stats = kmalloc(sizeof(*fw_iscsi_stats), GFP_KERNEL);
  845. if (!fw_iscsi_stats) {
  846. QEDI_ERR(&qedi->dbg_ctx,
  847. "Could not allocate memory for fw_iscsi_stats.\n");
  848. goto exit_get_data;
  849. }
  850. mutex_lock(&qedi->stats_lock);
  851. /* Query firmware for offload stats */
  852. qedi_ops->get_stats(qedi->cdev, fw_iscsi_stats);
  853. mutex_unlock(&qedi->stats_lock);
  854. iscsi->rx_frames_set = true;
  855. iscsi->rx_frames = fw_iscsi_stats->iscsi_rx_packet_cnt;
  856. iscsi->rx_bytes_set = true;
  857. iscsi->rx_bytes = fw_iscsi_stats->iscsi_rx_bytes_cnt;
  858. iscsi->tx_frames_set = true;
  859. iscsi->tx_frames = fw_iscsi_stats->iscsi_tx_packet_cnt;
  860. iscsi->tx_bytes_set = true;
  861. iscsi->tx_bytes = fw_iscsi_stats->iscsi_tx_bytes_cnt;
  862. iscsi->frame_size_set = true;
  863. iscsi->frame_size = qedi->ll2_mtu;
  864. block = qedi_get_nvram_block(qedi);
  865. if (block) {
  866. chap_en = !!(block->generic.ctrl_flags &
  867. NVM_ISCSI_CFG_GEN_CHAP_ENABLED);
  868. mchap_en = !!(block->generic.ctrl_flags &
  869. NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED);
  870. iscsi->auth_method_set = (chap_en || mchap_en) ? true : false;
  871. iscsi->auth_method = 1;
  872. if (chap_en)
  873. iscsi->auth_method = 2;
  874. if (mchap_en)
  875. iscsi->auth_method = 3;
  876. iscsi->tx_desc_size_set = true;
  877. iscsi->tx_desc_size = QEDI_SQ_SIZE;
  878. iscsi->rx_desc_size_set = true;
  879. iscsi->rx_desc_size = QEDI_CQ_SIZE;
  880. /* tpgt, hdr digest, data digest */
  881. rval = qedi_find_boot_info(qedi, iscsi, block);
  882. if (rval)
  883. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  884. "Boot target not set");
  885. }
  886. kfree(fw_iscsi_stats);
  887. exit_get_data:
  888. return;
  889. }
  890. static void qedi_link_update(void *dev, struct qed_link_output *link)
  891. {
  892. struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
  893. if (link->link_up) {
  894. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "Link Up event.\n");
  895. atomic_set(&qedi->link_state, QEDI_LINK_UP);
  896. } else {
  897. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  898. "Link Down event.\n");
  899. atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
  900. }
  901. }
  902. static struct qed_iscsi_cb_ops qedi_cb_ops = {
  903. {
  904. .link_update = qedi_link_update,
  905. .get_protocol_tlv_data = qedi_get_protocol_tlv_data,
  906. .get_generic_tlv_data = qedi_get_generic_tlv_data,
  907. }
  908. };
  909. static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe,
  910. u16 que_idx, struct qedi_percpu_s *p)
  911. {
  912. struct qedi_work *qedi_work;
  913. struct qedi_conn *q_conn;
  914. struct iscsi_conn *conn;
  915. struct qedi_cmd *qedi_cmd;
  916. u32 iscsi_cid;
  917. int rc = 0;
  918. iscsi_cid = cqe->cqe_common.conn_id;
  919. q_conn = qedi->cid_que.conn_cid_tbl[iscsi_cid];
  920. if (!q_conn) {
  921. QEDI_WARN(&qedi->dbg_ctx,
  922. "Session no longer exists for cid=0x%x!!\n",
  923. iscsi_cid);
  924. return -1;
  925. }
  926. conn = q_conn->cls_conn->dd_data;
  927. switch (cqe->cqe_common.cqe_type) {
  928. case ISCSI_CQE_TYPE_SOLICITED:
  929. case ISCSI_CQE_TYPE_SOLICITED_WITH_SENSE:
  930. qedi_cmd = qedi_get_cmd_from_tid(qedi, cqe->cqe_solicited.itid);
  931. if (!qedi_cmd) {
  932. rc = -1;
  933. break;
  934. }
  935. INIT_LIST_HEAD(&qedi_cmd->cqe_work.list);
  936. qedi_cmd->cqe_work.qedi = qedi;
  937. memcpy(&qedi_cmd->cqe_work.cqe, cqe, sizeof(union iscsi_cqe));
  938. qedi_cmd->cqe_work.que_idx = que_idx;
  939. qedi_cmd->cqe_work.is_solicited = true;
  940. list_add_tail(&qedi_cmd->cqe_work.list, &p->work_list);
  941. break;
  942. case ISCSI_CQE_TYPE_UNSOLICITED:
  943. case ISCSI_CQE_TYPE_DUMMY:
  944. case ISCSI_CQE_TYPE_TASK_CLEANUP:
  945. qedi_work = kzalloc(sizeof(*qedi_work), GFP_ATOMIC);
  946. if (!qedi_work) {
  947. rc = -1;
  948. break;
  949. }
  950. INIT_LIST_HEAD(&qedi_work->list);
  951. qedi_work->qedi = qedi;
  952. memcpy(&qedi_work->cqe, cqe, sizeof(union iscsi_cqe));
  953. qedi_work->que_idx = que_idx;
  954. qedi_work->is_solicited = false;
  955. list_add_tail(&qedi_work->list, &p->work_list);
  956. break;
  957. default:
  958. rc = -1;
  959. QEDI_ERR(&qedi->dbg_ctx, "FW Error cqe.\n");
  960. }
  961. return rc;
  962. }
  963. static bool qedi_process_completions(struct qedi_fastpath *fp)
  964. {
  965. struct qedi_ctx *qedi = fp->qedi;
  966. struct qed_sb_info *sb_info = fp->sb_info;
  967. struct status_block_e4 *sb = sb_info->sb_virt;
  968. struct qedi_percpu_s *p = NULL;
  969. struct global_queue *que;
  970. u16 prod_idx;
  971. unsigned long flags;
  972. union iscsi_cqe *cqe;
  973. int cpu;
  974. int ret;
  975. /* Get the current firmware producer index */
  976. prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
  977. if (prod_idx >= QEDI_CQ_SIZE)
  978. prod_idx = prod_idx % QEDI_CQ_SIZE;
  979. que = qedi->global_queues[fp->sb_id];
  980. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
  981. "Before: global queue=%p prod_idx=%d cons_idx=%d, sb_id=%d\n",
  982. que, prod_idx, que->cq_cons_idx, fp->sb_id);
  983. qedi->intr_cpu = fp->sb_id;
  984. cpu = smp_processor_id();
  985. p = &per_cpu(qedi_percpu, cpu);
  986. if (unlikely(!p->iothread))
  987. WARN_ON(1);
  988. spin_lock_irqsave(&p->p_work_lock, flags);
  989. while (que->cq_cons_idx != prod_idx) {
  990. cqe = &que->cq[que->cq_cons_idx];
  991. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_IO,
  992. "cqe=%p prod_idx=%d cons_idx=%d.\n",
  993. cqe, prod_idx, que->cq_cons_idx);
  994. ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
  995. if (ret)
  996. QEDI_WARN(&qedi->dbg_ctx,
  997. "Dropping CQE 0x%x for cid=0x%x.\n",
  998. que->cq_cons_idx, cqe->cqe_common.conn_id);
  999. que->cq_cons_idx++;
  1000. if (que->cq_cons_idx == QEDI_CQ_SIZE)
  1001. que->cq_cons_idx = 0;
  1002. }
  1003. wake_up_process(p->iothread);
  1004. spin_unlock_irqrestore(&p->p_work_lock, flags);
  1005. return true;
  1006. }
  1007. static bool qedi_fp_has_work(struct qedi_fastpath *fp)
  1008. {
  1009. struct qedi_ctx *qedi = fp->qedi;
  1010. struct global_queue *que;
  1011. struct qed_sb_info *sb_info = fp->sb_info;
  1012. struct status_block_e4 *sb = sb_info->sb_virt;
  1013. u16 prod_idx;
  1014. barrier();
  1015. /* Get the current firmware producer index */
  1016. prod_idx = sb->pi_array[QEDI_PROTO_CQ_PROD_IDX];
  1017. /* Get the pointer to the global CQ this completion is on */
  1018. que = qedi->global_queues[fp->sb_id];
  1019. /* prod idx wrap around uint16 */
  1020. if (prod_idx >= QEDI_CQ_SIZE)
  1021. prod_idx = prod_idx % QEDI_CQ_SIZE;
  1022. return (que->cq_cons_idx != prod_idx);
  1023. }
  1024. /* MSI-X fastpath handler code */
  1025. static irqreturn_t qedi_msix_handler(int irq, void *dev_id)
  1026. {
  1027. struct qedi_fastpath *fp = dev_id;
  1028. struct qedi_ctx *qedi = fp->qedi;
  1029. bool wake_io_thread = true;
  1030. qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
  1031. process_again:
  1032. wake_io_thread = qedi_process_completions(fp);
  1033. if (wake_io_thread) {
  1034. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
  1035. "process already running\n");
  1036. }
  1037. if (qedi_fp_has_work(fp) == 0)
  1038. qed_sb_update_sb_idx(fp->sb_info);
  1039. /* Check for more work */
  1040. rmb();
  1041. if (qedi_fp_has_work(fp) == 0)
  1042. qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
  1043. else
  1044. goto process_again;
  1045. return IRQ_HANDLED;
  1046. }
  1047. /* simd handler for MSI/INTa */
  1048. static void qedi_simd_int_handler(void *cookie)
  1049. {
  1050. /* Cookie is qedi_ctx struct */
  1051. struct qedi_ctx *qedi = (struct qedi_ctx *)cookie;
  1052. QEDI_WARN(&qedi->dbg_ctx, "qedi=%p.\n", qedi);
  1053. }
  1054. #define QEDI_SIMD_HANDLER_NUM 0
  1055. static void qedi_sync_free_irqs(struct qedi_ctx *qedi)
  1056. {
  1057. int i;
  1058. if (qedi->int_info.msix_cnt) {
  1059. for (i = 0; i < qedi->int_info.used_cnt; i++) {
  1060. synchronize_irq(qedi->int_info.msix[i].vector);
  1061. irq_set_affinity_hint(qedi->int_info.msix[i].vector,
  1062. NULL);
  1063. free_irq(qedi->int_info.msix[i].vector,
  1064. &qedi->fp_array[i]);
  1065. }
  1066. } else {
  1067. qedi_ops->common->simd_handler_clean(qedi->cdev,
  1068. QEDI_SIMD_HANDLER_NUM);
  1069. }
  1070. qedi->int_info.used_cnt = 0;
  1071. qedi_ops->common->set_fp_int(qedi->cdev, 0);
  1072. }
  1073. static int qedi_request_msix_irq(struct qedi_ctx *qedi)
  1074. {
  1075. int i, rc, cpu;
  1076. cpu = cpumask_first(cpu_online_mask);
  1077. for (i = 0; i < MIN_NUM_CPUS_MSIX(qedi); i++) {
  1078. rc = request_irq(qedi->int_info.msix[i].vector,
  1079. qedi_msix_handler, 0, "qedi",
  1080. &qedi->fp_array[i]);
  1081. if (rc) {
  1082. QEDI_WARN(&qedi->dbg_ctx, "request_irq failed.\n");
  1083. qedi_sync_free_irqs(qedi);
  1084. return rc;
  1085. }
  1086. qedi->int_info.used_cnt++;
  1087. rc = irq_set_affinity_hint(qedi->int_info.msix[i].vector,
  1088. get_cpu_mask(cpu));
  1089. cpu = cpumask_next(cpu, cpu_online_mask);
  1090. }
  1091. return 0;
  1092. }
  1093. static int qedi_setup_int(struct qedi_ctx *qedi)
  1094. {
  1095. int rc = 0;
  1096. rc = qedi_ops->common->set_fp_int(qedi->cdev, num_online_cpus());
  1097. rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info);
  1098. if (rc)
  1099. goto exit_setup_int;
  1100. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
  1101. "Number of msix_cnt = 0x%x num of cpus = 0x%x\n",
  1102. qedi->int_info.msix_cnt, num_online_cpus());
  1103. if (qedi->int_info.msix_cnt) {
  1104. rc = qedi_request_msix_irq(qedi);
  1105. goto exit_setup_int;
  1106. } else {
  1107. qedi_ops->common->simd_handler_config(qedi->cdev, &qedi,
  1108. QEDI_SIMD_HANDLER_NUM,
  1109. qedi_simd_int_handler);
  1110. qedi->int_info.used_cnt = 1;
  1111. }
  1112. exit_setup_int:
  1113. return rc;
  1114. }
  1115. static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
  1116. {
  1117. if (qedi->iscsi_image)
  1118. dma_free_coherent(&qedi->pdev->dev,
  1119. sizeof(struct qedi_nvm_iscsi_image),
  1120. qedi->iscsi_image, qedi->nvm_buf_dma);
  1121. }
  1122. static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
  1123. {
  1124. struct qedi_nvm_iscsi_image nvm_image;
  1125. qedi->iscsi_image = dma_zalloc_coherent(&qedi->pdev->dev,
  1126. sizeof(nvm_image),
  1127. &qedi->nvm_buf_dma,
  1128. GFP_KERNEL);
  1129. if (!qedi->iscsi_image) {
  1130. QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
  1131. return -ENOMEM;
  1132. }
  1133. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  1134. "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_image,
  1135. qedi->nvm_buf_dma);
  1136. return 0;
  1137. }
  1138. static void qedi_free_bdq(struct qedi_ctx *qedi)
  1139. {
  1140. int i;
  1141. if (qedi->bdq_pbl_list)
  1142. dma_free_coherent(&qedi->pdev->dev, PAGE_SIZE,
  1143. qedi->bdq_pbl_list, qedi->bdq_pbl_list_dma);
  1144. if (qedi->bdq_pbl)
  1145. dma_free_coherent(&qedi->pdev->dev, qedi->bdq_pbl_mem_size,
  1146. qedi->bdq_pbl, qedi->bdq_pbl_dma);
  1147. for (i = 0; i < QEDI_BDQ_NUM; i++) {
  1148. if (qedi->bdq[i].buf_addr) {
  1149. dma_free_coherent(&qedi->pdev->dev, QEDI_BDQ_BUF_SIZE,
  1150. qedi->bdq[i].buf_addr,
  1151. qedi->bdq[i].buf_dma);
  1152. }
  1153. }
  1154. }
  1155. static void qedi_free_global_queues(struct qedi_ctx *qedi)
  1156. {
  1157. int i;
  1158. struct global_queue **gl = qedi->global_queues;
  1159. for (i = 0; i < qedi->num_queues; i++) {
  1160. if (!gl[i])
  1161. continue;
  1162. if (gl[i]->cq)
  1163. dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_mem_size,
  1164. gl[i]->cq, gl[i]->cq_dma);
  1165. if (gl[i]->cq_pbl)
  1166. dma_free_coherent(&qedi->pdev->dev, gl[i]->cq_pbl_size,
  1167. gl[i]->cq_pbl, gl[i]->cq_pbl_dma);
  1168. kfree(gl[i]);
  1169. }
  1170. qedi_free_bdq(qedi);
  1171. qedi_free_nvm_iscsi_cfg(qedi);
  1172. }
  1173. static int qedi_alloc_bdq(struct qedi_ctx *qedi)
  1174. {
  1175. int i;
  1176. struct scsi_bd *pbl;
  1177. u64 *list;
  1178. dma_addr_t page;
  1179. /* Alloc dma memory for BDQ buffers */
  1180. for (i = 0; i < QEDI_BDQ_NUM; i++) {
  1181. qedi->bdq[i].buf_addr =
  1182. dma_alloc_coherent(&qedi->pdev->dev,
  1183. QEDI_BDQ_BUF_SIZE,
  1184. &qedi->bdq[i].buf_dma,
  1185. GFP_KERNEL);
  1186. if (!qedi->bdq[i].buf_addr) {
  1187. QEDI_ERR(&qedi->dbg_ctx,
  1188. "Could not allocate BDQ buffer %d.\n", i);
  1189. return -ENOMEM;
  1190. }
  1191. }
  1192. /* Alloc dma memory for BDQ page buffer list */
  1193. qedi->bdq_pbl_mem_size = QEDI_BDQ_NUM * sizeof(struct scsi_bd);
  1194. qedi->bdq_pbl_mem_size = ALIGN(qedi->bdq_pbl_mem_size, PAGE_SIZE);
  1195. qedi->rq_num_entries = qedi->bdq_pbl_mem_size / sizeof(struct scsi_bd);
  1196. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, "rq_num_entries = %d.\n",
  1197. qedi->rq_num_entries);
  1198. qedi->bdq_pbl = dma_alloc_coherent(&qedi->pdev->dev,
  1199. qedi->bdq_pbl_mem_size,
  1200. &qedi->bdq_pbl_dma, GFP_KERNEL);
  1201. if (!qedi->bdq_pbl) {
  1202. QEDI_ERR(&qedi->dbg_ctx, "Could not allocate BDQ PBL.\n");
  1203. return -ENOMEM;
  1204. }
  1205. /*
  1206. * Populate BDQ PBL with physical and virtual address of individual
  1207. * BDQ buffers
  1208. */
  1209. pbl = (struct scsi_bd *)qedi->bdq_pbl;
  1210. for (i = 0; i < QEDI_BDQ_NUM; i++) {
  1211. pbl->address.hi =
  1212. cpu_to_le32(QEDI_U64_HI(qedi->bdq[i].buf_dma));
  1213. pbl->address.lo =
  1214. cpu_to_le32(QEDI_U64_LO(qedi->bdq[i].buf_dma));
  1215. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  1216. "pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
  1217. pbl, pbl->address.hi, pbl->address.lo, i);
  1218. pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
  1219. pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
  1220. pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
  1221. pbl->opaque.iscsi_opaque.opaque = cpu_to_le16(i);
  1222. pbl++;
  1223. }
  1224. /* Allocate list of PBL pages */
  1225. qedi->bdq_pbl_list = dma_zalloc_coherent(&qedi->pdev->dev, PAGE_SIZE,
  1226. &qedi->bdq_pbl_list_dma,
  1227. GFP_KERNEL);
  1228. if (!qedi->bdq_pbl_list) {
  1229. QEDI_ERR(&qedi->dbg_ctx,
  1230. "Could not allocate list of PBL pages.\n");
  1231. return -ENOMEM;
  1232. }
  1233. /*
  1234. * Now populate PBL list with pages that contain pointers to the
  1235. * individual buffers.
  1236. */
  1237. qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size / PAGE_SIZE;
  1238. list = (u64 *)qedi->bdq_pbl_list;
  1239. page = qedi->bdq_pbl_list_dma;
  1240. for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
  1241. *list = qedi->bdq_pbl_dma;
  1242. list++;
  1243. page += PAGE_SIZE;
  1244. }
  1245. return 0;
  1246. }
  1247. static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
  1248. {
  1249. u32 *list;
  1250. int i;
  1251. int status = 0, rc;
  1252. u32 *pbl;
  1253. dma_addr_t page;
  1254. int num_pages;
  1255. /*
  1256. * Number of global queues (CQ / RQ). This should
  1257. * be <= number of available MSIX vectors for the PF
  1258. */
  1259. if (!qedi->num_queues) {
  1260. QEDI_ERR(&qedi->dbg_ctx, "No MSI-X vectors available!\n");
  1261. return 1;
  1262. }
  1263. /* Make sure we allocated the PBL that will contain the physical
  1264. * addresses of our queues
  1265. */
  1266. if (!qedi->p_cpuq) {
  1267. status = 1;
  1268. goto mem_alloc_failure;
  1269. }
  1270. qedi->global_queues = kzalloc((sizeof(struct global_queue *) *
  1271. qedi->num_queues), GFP_KERNEL);
  1272. if (!qedi->global_queues) {
  1273. QEDI_ERR(&qedi->dbg_ctx,
  1274. "Unable to allocate global queues array ptr memory\n");
  1275. return -ENOMEM;
  1276. }
  1277. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
  1278. "qedi->global_queues=%p.\n", qedi->global_queues);
  1279. /* Allocate DMA coherent buffers for BDQ */
  1280. rc = qedi_alloc_bdq(qedi);
  1281. if (rc)
  1282. goto mem_alloc_failure;
  1283. /* Allocate DMA coherent buffers for NVM_ISCSI_CFG */
  1284. rc = qedi_alloc_nvm_iscsi_cfg(qedi);
  1285. if (rc)
  1286. goto mem_alloc_failure;
  1287. /* Allocate a CQ and an associated PBL for each MSI-X
  1288. * vector.
  1289. */
  1290. for (i = 0; i < qedi->num_queues; i++) {
  1291. qedi->global_queues[i] =
  1292. kzalloc(sizeof(*qedi->global_queues[0]),
  1293. GFP_KERNEL);
  1294. if (!qedi->global_queues[i]) {
  1295. QEDI_ERR(&qedi->dbg_ctx,
  1296. "Unable to allocation global queue %d.\n", i);
  1297. goto mem_alloc_failure;
  1298. }
  1299. qedi->global_queues[i]->cq_mem_size =
  1300. (QEDI_CQ_SIZE + 8) * sizeof(union iscsi_cqe);
  1301. qedi->global_queues[i]->cq_mem_size =
  1302. (qedi->global_queues[i]->cq_mem_size +
  1303. (QEDI_PAGE_SIZE - 1));
  1304. qedi->global_queues[i]->cq_pbl_size =
  1305. (qedi->global_queues[i]->cq_mem_size /
  1306. QEDI_PAGE_SIZE) * sizeof(void *);
  1307. qedi->global_queues[i]->cq_pbl_size =
  1308. (qedi->global_queues[i]->cq_pbl_size +
  1309. (QEDI_PAGE_SIZE - 1));
  1310. qedi->global_queues[i]->cq = dma_zalloc_coherent(&qedi->pdev->dev,
  1311. qedi->global_queues[i]->cq_mem_size,
  1312. &qedi->global_queues[i]->cq_dma,
  1313. GFP_KERNEL);
  1314. if (!qedi->global_queues[i]->cq) {
  1315. QEDI_WARN(&qedi->dbg_ctx,
  1316. "Could not allocate cq.\n");
  1317. status = -ENOMEM;
  1318. goto mem_alloc_failure;
  1319. }
  1320. qedi->global_queues[i]->cq_pbl = dma_zalloc_coherent(&qedi->pdev->dev,
  1321. qedi->global_queues[i]->cq_pbl_size,
  1322. &qedi->global_queues[i]->cq_pbl_dma,
  1323. GFP_KERNEL);
  1324. if (!qedi->global_queues[i]->cq_pbl) {
  1325. QEDI_WARN(&qedi->dbg_ctx,
  1326. "Could not allocate cq PBL.\n");
  1327. status = -ENOMEM;
  1328. goto mem_alloc_failure;
  1329. }
  1330. /* Create PBL */
  1331. num_pages = qedi->global_queues[i]->cq_mem_size /
  1332. QEDI_PAGE_SIZE;
  1333. page = qedi->global_queues[i]->cq_dma;
  1334. pbl = (u32 *)qedi->global_queues[i]->cq_pbl;
  1335. while (num_pages--) {
  1336. *pbl = (u32)page;
  1337. pbl++;
  1338. *pbl = (u32)((u64)page >> 32);
  1339. pbl++;
  1340. page += QEDI_PAGE_SIZE;
  1341. }
  1342. }
  1343. list = (u32 *)qedi->p_cpuq;
  1344. /*
  1345. * The list is built as follows: CQ#0 PBL pointer, RQ#0 PBL pointer,
  1346. * CQ#1 PBL pointer, RQ#1 PBL pointer, etc. Each PBL pointer points
  1347. * to the physical address which contains an array of pointers to the
  1348. * physical addresses of the specific queue pages.
  1349. */
  1350. for (i = 0; i < qedi->num_queues; i++) {
  1351. *list = (u32)qedi->global_queues[i]->cq_pbl_dma;
  1352. list++;
  1353. *list = (u32)((u64)qedi->global_queues[i]->cq_pbl_dma >> 32);
  1354. list++;
  1355. *list = (u32)0;
  1356. list++;
  1357. *list = (u32)((u64)0 >> 32);
  1358. list++;
  1359. }
  1360. return 0;
  1361. mem_alloc_failure:
  1362. qedi_free_global_queues(qedi);
  1363. return status;
  1364. }
  1365. int qedi_alloc_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
  1366. {
  1367. int rval = 0;
  1368. u32 *pbl;
  1369. dma_addr_t page;
  1370. int num_pages;
  1371. if (!ep)
  1372. return -EIO;
  1373. /* Calculate appropriate queue and PBL sizes */
  1374. ep->sq_mem_size = QEDI_SQ_SIZE * sizeof(struct iscsi_wqe);
  1375. ep->sq_mem_size += QEDI_PAGE_SIZE - 1;
  1376. ep->sq_pbl_size = (ep->sq_mem_size / QEDI_PAGE_SIZE) * sizeof(void *);
  1377. ep->sq_pbl_size = ep->sq_pbl_size + QEDI_PAGE_SIZE;
  1378. ep->sq = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_mem_size,
  1379. &ep->sq_dma, GFP_KERNEL);
  1380. if (!ep->sq) {
  1381. QEDI_WARN(&qedi->dbg_ctx,
  1382. "Could not allocate send queue.\n");
  1383. rval = -ENOMEM;
  1384. goto out;
  1385. }
  1386. ep->sq_pbl = dma_zalloc_coherent(&qedi->pdev->dev, ep->sq_pbl_size,
  1387. &ep->sq_pbl_dma, GFP_KERNEL);
  1388. if (!ep->sq_pbl) {
  1389. QEDI_WARN(&qedi->dbg_ctx,
  1390. "Could not allocate send queue PBL.\n");
  1391. rval = -ENOMEM;
  1392. goto out_free_sq;
  1393. }
  1394. /* Create PBL */
  1395. num_pages = ep->sq_mem_size / QEDI_PAGE_SIZE;
  1396. page = ep->sq_dma;
  1397. pbl = (u32 *)ep->sq_pbl;
  1398. while (num_pages--) {
  1399. *pbl = (u32)page;
  1400. pbl++;
  1401. *pbl = (u32)((u64)page >> 32);
  1402. pbl++;
  1403. page += QEDI_PAGE_SIZE;
  1404. }
  1405. return rval;
  1406. out_free_sq:
  1407. dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
  1408. ep->sq_dma);
  1409. out:
  1410. return rval;
  1411. }
  1412. void qedi_free_sq(struct qedi_ctx *qedi, struct qedi_endpoint *ep)
  1413. {
  1414. if (ep->sq_pbl)
  1415. dma_free_coherent(&qedi->pdev->dev, ep->sq_pbl_size, ep->sq_pbl,
  1416. ep->sq_pbl_dma);
  1417. if (ep->sq)
  1418. dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq,
  1419. ep->sq_dma);
  1420. }
  1421. int qedi_get_task_idx(struct qedi_ctx *qedi)
  1422. {
  1423. s16 tmp_idx;
  1424. again:
  1425. tmp_idx = find_first_zero_bit(qedi->task_idx_map,
  1426. MAX_ISCSI_TASK_ENTRIES);
  1427. if (tmp_idx >= MAX_ISCSI_TASK_ENTRIES) {
  1428. QEDI_ERR(&qedi->dbg_ctx, "FW task context pool is full.\n");
  1429. tmp_idx = -1;
  1430. goto err_idx;
  1431. }
  1432. if (test_and_set_bit(tmp_idx, qedi->task_idx_map))
  1433. goto again;
  1434. err_idx:
  1435. return tmp_idx;
  1436. }
  1437. void qedi_clear_task_idx(struct qedi_ctx *qedi, int idx)
  1438. {
  1439. if (!test_and_clear_bit(idx, qedi->task_idx_map))
  1440. QEDI_ERR(&qedi->dbg_ctx,
  1441. "FW task context, already cleared, tid=0x%x\n", idx);
  1442. }
  1443. void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
  1444. struct qedi_cmd *cmd)
  1445. {
  1446. qedi->itt_map[tid].itt = proto_itt;
  1447. qedi->itt_map[tid].p_cmd = cmd;
  1448. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  1449. "update itt map tid=0x%x, with proto itt=0x%x\n", tid,
  1450. qedi->itt_map[tid].itt);
  1451. }
  1452. void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, s16 *tid)
  1453. {
  1454. u16 i;
  1455. for (i = 0; i < MAX_ISCSI_TASK_ENTRIES; i++) {
  1456. if (qedi->itt_map[i].itt == itt) {
  1457. *tid = i;
  1458. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  1459. "Ref itt=0x%x, found at tid=0x%x\n",
  1460. itt, *tid);
  1461. return;
  1462. }
  1463. }
  1464. WARN_ON(1);
  1465. }
  1466. void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt)
  1467. {
  1468. *proto_itt = qedi->itt_map[tid].itt;
  1469. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
  1470. "Get itt map tid [0x%x with proto itt[0x%x]",
  1471. tid, *proto_itt);
  1472. }
  1473. struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid)
  1474. {
  1475. struct qedi_cmd *cmd = NULL;
  1476. if (tid >= MAX_ISCSI_TASK_ENTRIES)
  1477. return NULL;
  1478. cmd = qedi->itt_map[tid].p_cmd;
  1479. if (cmd->task_id != tid)
  1480. return NULL;
  1481. qedi->itt_map[tid].p_cmd = NULL;
  1482. return cmd;
  1483. }
  1484. static int qedi_alloc_itt(struct qedi_ctx *qedi)
  1485. {
  1486. qedi->itt_map = kcalloc(MAX_ISCSI_TASK_ENTRIES,
  1487. sizeof(struct qedi_itt_map), GFP_KERNEL);
  1488. if (!qedi->itt_map) {
  1489. QEDI_ERR(&qedi->dbg_ctx,
  1490. "Unable to allocate itt map array memory\n");
  1491. return -ENOMEM;
  1492. }
  1493. return 0;
  1494. }
  1495. static void qedi_free_itt(struct qedi_ctx *qedi)
  1496. {
  1497. kfree(qedi->itt_map);
  1498. }
  1499. static struct qed_ll2_cb_ops qedi_ll2_cb_ops = {
  1500. .rx_cb = qedi_ll2_rx,
  1501. .tx_cb = NULL,
  1502. };
  1503. static int qedi_percpu_io_thread(void *arg)
  1504. {
  1505. struct qedi_percpu_s *p = arg;
  1506. struct qedi_work *work, *tmp;
  1507. unsigned long flags;
  1508. LIST_HEAD(work_list);
  1509. set_user_nice(current, -20);
  1510. while (!kthread_should_stop()) {
  1511. spin_lock_irqsave(&p->p_work_lock, flags);
  1512. while (!list_empty(&p->work_list)) {
  1513. list_splice_init(&p->work_list, &work_list);
  1514. spin_unlock_irqrestore(&p->p_work_lock, flags);
  1515. list_for_each_entry_safe(work, tmp, &work_list, list) {
  1516. list_del_init(&work->list);
  1517. qedi_fp_process_cqes(work);
  1518. if (!work->is_solicited)
  1519. kfree(work);
  1520. }
  1521. cond_resched();
  1522. spin_lock_irqsave(&p->p_work_lock, flags);
  1523. }
  1524. set_current_state(TASK_INTERRUPTIBLE);
  1525. spin_unlock_irqrestore(&p->p_work_lock, flags);
  1526. schedule();
  1527. }
  1528. __set_current_state(TASK_RUNNING);
  1529. return 0;
  1530. }
  1531. static int qedi_cpu_online(unsigned int cpu)
  1532. {
  1533. struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
  1534. struct task_struct *thread;
  1535. thread = kthread_create_on_node(qedi_percpu_io_thread, (void *)p,
  1536. cpu_to_node(cpu),
  1537. "qedi_thread/%d", cpu);
  1538. if (IS_ERR(thread))
  1539. return PTR_ERR(thread);
  1540. kthread_bind(thread, cpu);
  1541. p->iothread = thread;
  1542. wake_up_process(thread);
  1543. return 0;
  1544. }
  1545. static int qedi_cpu_offline(unsigned int cpu)
  1546. {
  1547. struct qedi_percpu_s *p = this_cpu_ptr(&qedi_percpu);
  1548. struct qedi_work *work, *tmp;
  1549. struct task_struct *thread;
  1550. spin_lock_bh(&p->p_work_lock);
  1551. thread = p->iothread;
  1552. p->iothread = NULL;
  1553. list_for_each_entry_safe(work, tmp, &p->work_list, list) {
  1554. list_del_init(&work->list);
  1555. qedi_fp_process_cqes(work);
  1556. if (!work->is_solicited)
  1557. kfree(work);
  1558. }
  1559. spin_unlock_bh(&p->p_work_lock);
  1560. if (thread)
  1561. kthread_stop(thread);
  1562. return 0;
  1563. }
  1564. void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu)
  1565. {
  1566. struct qed_ll2_params params;
  1567. qedi_recover_all_conns(qedi);
  1568. qedi_ops->ll2->stop(qedi->cdev);
  1569. qedi_ll2_free_skbs(qedi);
  1570. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, "old MTU %u, new MTU %u\n",
  1571. qedi->ll2_mtu, mtu);
  1572. memset(&params, 0, sizeof(params));
  1573. qedi->ll2_mtu = mtu;
  1574. params.mtu = qedi->ll2_mtu + IPV6_HDR_LEN + TCP_HDR_LEN;
  1575. params.drop_ttl0_packets = 0;
  1576. params.rx_vlan_stripping = 1;
  1577. ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
  1578. qedi_ops->ll2->start(qedi->cdev, &params);
  1579. }
  1580. /**
  1581. * qedi_get_nvram_block: - Scan through the iSCSI NVRAM block (while accounting
  1582. * for gaps) for the matching absolute-pf-id of the QEDI device.
  1583. */
  1584. static struct nvm_iscsi_block *
  1585. qedi_get_nvram_block(struct qedi_ctx *qedi)
  1586. {
  1587. int i;
  1588. u8 pf;
  1589. u32 flags;
  1590. struct nvm_iscsi_block *block;
  1591. pf = qedi->dev_info.common.abs_pf_id;
  1592. block = &qedi->iscsi_image->iscsi_cfg.block[0];
  1593. for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) {
  1594. flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >>
  1595. NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET;
  1596. if (flags & (NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY |
  1597. NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED) &&
  1598. (pf == (block->id & NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK)
  1599. >> NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET))
  1600. return block;
  1601. }
  1602. return NULL;
  1603. }
  1604. static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
  1605. {
  1606. struct qedi_ctx *qedi = data;
  1607. struct nvm_iscsi_initiator *initiator;
  1608. int rc = 1;
  1609. u32 ipv6_en, dhcp_en, ip_len;
  1610. struct nvm_iscsi_block *block;
  1611. char *fmt, *ip, *sub, *gw;
  1612. block = qedi_get_nvram_block(qedi);
  1613. if (!block)
  1614. return 0;
  1615. initiator = &block->initiator;
  1616. ipv6_en = block->generic.ctrl_flags &
  1617. NVM_ISCSI_CFG_GEN_IPV6_ENABLED;
  1618. dhcp_en = block->generic.ctrl_flags &
  1619. NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED;
  1620. /* Static IP assignments. */
  1621. fmt = ipv6_en ? "%pI6\n" : "%pI4\n";
  1622. ip = ipv6_en ? initiator->ipv6.addr.byte : initiator->ipv4.addr.byte;
  1623. ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN;
  1624. sub = ipv6_en ? initiator->ipv6.subnet_mask.byte :
  1625. initiator->ipv4.subnet_mask.byte;
  1626. gw = ipv6_en ? initiator->ipv6.gateway.byte :
  1627. initiator->ipv4.gateway.byte;
  1628. /* DHCP IP adjustments. */
  1629. fmt = dhcp_en ? "%s\n" : fmt;
  1630. if (dhcp_en) {
  1631. ip = ipv6_en ? "0::0" : "0.0.0.0";
  1632. sub = ip;
  1633. gw = ip;
  1634. ip_len = ipv6_en ? 5 : 8;
  1635. }
  1636. switch (type) {
  1637. case ISCSI_BOOT_ETH_IP_ADDR:
  1638. rc = snprintf(buf, ip_len, fmt, ip);
  1639. break;
  1640. case ISCSI_BOOT_ETH_SUBNET_MASK:
  1641. rc = snprintf(buf, ip_len, fmt, sub);
  1642. break;
  1643. case ISCSI_BOOT_ETH_GATEWAY:
  1644. rc = snprintf(buf, ip_len, fmt, gw);
  1645. break;
  1646. case ISCSI_BOOT_ETH_FLAGS:
  1647. rc = snprintf(buf, 3, "%hhd\n",
  1648. SYSFS_FLAG_FW_SEL_BOOT);
  1649. break;
  1650. case ISCSI_BOOT_ETH_INDEX:
  1651. rc = snprintf(buf, 3, "0\n");
  1652. break;
  1653. case ISCSI_BOOT_ETH_MAC:
  1654. rc = sysfs_format_mac(buf, qedi->mac, ETH_ALEN);
  1655. break;
  1656. case ISCSI_BOOT_ETH_VLAN:
  1657. rc = snprintf(buf, 12, "%d\n",
  1658. GET_FIELD2(initiator->generic_cont0,
  1659. NVM_ISCSI_CFG_INITIATOR_VLAN));
  1660. break;
  1661. case ISCSI_BOOT_ETH_ORIGIN:
  1662. if (dhcp_en)
  1663. rc = snprintf(buf, 3, "3\n");
  1664. break;
  1665. default:
  1666. rc = 0;
  1667. break;
  1668. }
  1669. return rc;
  1670. }
  1671. static umode_t qedi_eth_get_attr_visibility(void *data, int type)
  1672. {
  1673. int rc = 1;
  1674. switch (type) {
  1675. case ISCSI_BOOT_ETH_FLAGS:
  1676. case ISCSI_BOOT_ETH_MAC:
  1677. case ISCSI_BOOT_ETH_INDEX:
  1678. case ISCSI_BOOT_ETH_IP_ADDR:
  1679. case ISCSI_BOOT_ETH_SUBNET_MASK:
  1680. case ISCSI_BOOT_ETH_GATEWAY:
  1681. case ISCSI_BOOT_ETH_ORIGIN:
  1682. case ISCSI_BOOT_ETH_VLAN:
  1683. rc = 0444;
  1684. break;
  1685. default:
  1686. rc = 0;
  1687. break;
  1688. }
  1689. return rc;
  1690. }
  1691. static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)
  1692. {
  1693. struct qedi_ctx *qedi = data;
  1694. struct nvm_iscsi_initiator *initiator;
  1695. int rc;
  1696. struct nvm_iscsi_block *block;
  1697. block = qedi_get_nvram_block(qedi);
  1698. if (!block)
  1699. return 0;
  1700. initiator = &block->initiator;
  1701. switch (type) {
  1702. case ISCSI_BOOT_INI_INITIATOR_NAME:
  1703. rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
  1704. initiator->initiator_name.byte);
  1705. break;
  1706. default:
  1707. rc = 0;
  1708. break;
  1709. }
  1710. return rc;
  1711. }
  1712. static umode_t qedi_ini_get_attr_visibility(void *data, int type)
  1713. {
  1714. int rc;
  1715. switch (type) {
  1716. case ISCSI_BOOT_INI_INITIATOR_NAME:
  1717. rc = 0444;
  1718. break;
  1719. default:
  1720. rc = 0;
  1721. break;
  1722. }
  1723. return rc;
  1724. }
  1725. static ssize_t
  1726. qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
  1727. char *buf, enum qedi_nvm_tgts idx)
  1728. {
  1729. int rc = 1;
  1730. u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len;
  1731. struct nvm_iscsi_block *block;
  1732. char *chap_name, *chap_secret;
  1733. char *mchap_name, *mchap_secret;
  1734. block = qedi_get_nvram_block(qedi);
  1735. if (!block)
  1736. goto exit_show_tgt_info;
  1737. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT,
  1738. "Port:%d, tgt_idx:%d\n",
  1739. GET_FIELD2(block->id, NVM_ISCSI_CFG_BLK_MAPPED_PF_ID), idx);
  1740. ctrl_flags = block->target[idx].ctrl_flags &
  1741. NVM_ISCSI_CFG_TARGET_ENABLED;
  1742. if (!ctrl_flags) {
  1743. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT,
  1744. "Target disabled\n");
  1745. goto exit_show_tgt_info;
  1746. }
  1747. ipv6_en = block->generic.ctrl_flags &
  1748. NVM_ISCSI_CFG_GEN_IPV6_ENABLED;
  1749. ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN;
  1750. chap_en = block->generic.ctrl_flags &
  1751. NVM_ISCSI_CFG_GEN_CHAP_ENABLED;
  1752. chap_name = chap_en ? block->initiator.chap_name.byte : NULL;
  1753. chap_secret = chap_en ? block->initiator.chap_password.byte : NULL;
  1754. mchap_en = block->generic.ctrl_flags &
  1755. NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED;
  1756. mchap_name = mchap_en ? block->target[idx].chap_name.byte : NULL;
  1757. mchap_secret = mchap_en ? block->target[idx].chap_password.byte : NULL;
  1758. switch (type) {
  1759. case ISCSI_BOOT_TGT_NAME:
  1760. rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
  1761. block->target[idx].target_name.byte);
  1762. break;
  1763. case ISCSI_BOOT_TGT_IP_ADDR:
  1764. if (ipv6_en)
  1765. rc = snprintf(buf, ip_len, "%pI6\n",
  1766. block->target[idx].ipv6_addr.byte);
  1767. else
  1768. rc = snprintf(buf, ip_len, "%pI4\n",
  1769. block->target[idx].ipv4_addr.byte);
  1770. break;
  1771. case ISCSI_BOOT_TGT_PORT:
  1772. rc = snprintf(buf, 12, "%d\n",
  1773. GET_FIELD2(block->target[idx].generic_cont0,
  1774. NVM_ISCSI_CFG_TARGET_TCP_PORT));
  1775. break;
  1776. case ISCSI_BOOT_TGT_LUN:
  1777. rc = snprintf(buf, 22, "%.*d\n",
  1778. block->target[idx].lun.value[1],
  1779. block->target[idx].lun.value[0]);
  1780. break;
  1781. case ISCSI_BOOT_TGT_CHAP_NAME:
  1782. rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
  1783. chap_name);
  1784. break;
  1785. case ISCSI_BOOT_TGT_CHAP_SECRET:
  1786. rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
  1787. chap_secret);
  1788. break;
  1789. case ISCSI_BOOT_TGT_REV_CHAP_NAME:
  1790. rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
  1791. mchap_name);
  1792. break;
  1793. case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
  1794. rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
  1795. mchap_secret);
  1796. break;
  1797. case ISCSI_BOOT_TGT_FLAGS:
  1798. rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
  1799. break;
  1800. case ISCSI_BOOT_TGT_NIC_ASSOC:
  1801. rc = snprintf(buf, 3, "0\n");
  1802. break;
  1803. default:
  1804. rc = 0;
  1805. break;
  1806. }
  1807. exit_show_tgt_info:
  1808. return rc;
  1809. }
  1810. static ssize_t qedi_show_boot_tgt_pri_info(void *data, int type, char *buf)
  1811. {
  1812. struct qedi_ctx *qedi = data;
  1813. return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_PRI);
  1814. }
  1815. static ssize_t qedi_show_boot_tgt_sec_info(void *data, int type, char *buf)
  1816. {
  1817. struct qedi_ctx *qedi = data;
  1818. return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_SEC);
  1819. }
  1820. static umode_t qedi_tgt_get_attr_visibility(void *data, int type)
  1821. {
  1822. int rc;
  1823. switch (type) {
  1824. case ISCSI_BOOT_TGT_NAME:
  1825. case ISCSI_BOOT_TGT_IP_ADDR:
  1826. case ISCSI_BOOT_TGT_PORT:
  1827. case ISCSI_BOOT_TGT_LUN:
  1828. case ISCSI_BOOT_TGT_CHAP_NAME:
  1829. case ISCSI_BOOT_TGT_CHAP_SECRET:
  1830. case ISCSI_BOOT_TGT_REV_CHAP_NAME:
  1831. case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
  1832. case ISCSI_BOOT_TGT_NIC_ASSOC:
  1833. case ISCSI_BOOT_TGT_FLAGS:
  1834. rc = 0444;
  1835. break;
  1836. default:
  1837. rc = 0;
  1838. break;
  1839. }
  1840. return rc;
  1841. }
  1842. static void qedi_boot_release(void *data)
  1843. {
  1844. struct qedi_ctx *qedi = data;
  1845. scsi_host_put(qedi->shost);
  1846. }
  1847. static int qedi_get_boot_info(struct qedi_ctx *qedi)
  1848. {
  1849. int ret = 1;
  1850. struct qedi_nvm_iscsi_image nvm_image;
  1851. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  1852. "Get NVM iSCSI CFG image\n");
  1853. ret = qedi_ops->common->nvm_get_image(qedi->cdev,
  1854. QED_NVM_IMAGE_ISCSI_CFG,
  1855. (char *)qedi->iscsi_image,
  1856. sizeof(nvm_image));
  1857. if (ret)
  1858. QEDI_ERR(&qedi->dbg_ctx,
  1859. "Could not get NVM image. ret = %d\n", ret);
  1860. return ret;
  1861. }
  1862. static int qedi_setup_boot_info(struct qedi_ctx *qedi)
  1863. {
  1864. struct iscsi_boot_kobj *boot_kobj;
  1865. if (qedi_get_boot_info(qedi))
  1866. return -EPERM;
  1867. qedi->boot_kset = iscsi_boot_create_host_kset(qedi->shost->host_no);
  1868. if (!qedi->boot_kset)
  1869. goto kset_free;
  1870. if (!scsi_host_get(qedi->shost))
  1871. goto kset_free;
  1872. boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 0, qedi,
  1873. qedi_show_boot_tgt_pri_info,
  1874. qedi_tgt_get_attr_visibility,
  1875. qedi_boot_release);
  1876. if (!boot_kobj)
  1877. goto put_host;
  1878. if (!scsi_host_get(qedi->shost))
  1879. goto kset_free;
  1880. boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 1, qedi,
  1881. qedi_show_boot_tgt_sec_info,
  1882. qedi_tgt_get_attr_visibility,
  1883. qedi_boot_release);
  1884. if (!boot_kobj)
  1885. goto put_host;
  1886. if (!scsi_host_get(qedi->shost))
  1887. goto kset_free;
  1888. boot_kobj = iscsi_boot_create_initiator(qedi->boot_kset, 0, qedi,
  1889. qedi_show_boot_ini_info,
  1890. qedi_ini_get_attr_visibility,
  1891. qedi_boot_release);
  1892. if (!boot_kobj)
  1893. goto put_host;
  1894. if (!scsi_host_get(qedi->shost))
  1895. goto kset_free;
  1896. boot_kobj = iscsi_boot_create_ethernet(qedi->boot_kset, 0, qedi,
  1897. qedi_show_boot_eth_info,
  1898. qedi_eth_get_attr_visibility,
  1899. qedi_boot_release);
  1900. if (!boot_kobj)
  1901. goto put_host;
  1902. return 0;
  1903. put_host:
  1904. scsi_host_put(qedi->shost);
  1905. kset_free:
  1906. iscsi_boot_destroy_kset(qedi->boot_kset);
  1907. return -ENOMEM;
  1908. }
  1909. static void __qedi_remove(struct pci_dev *pdev, int mode)
  1910. {
  1911. struct qedi_ctx *qedi = pci_get_drvdata(pdev);
  1912. int rval;
  1913. if (qedi->tmf_thread) {
  1914. flush_workqueue(qedi->tmf_thread);
  1915. destroy_workqueue(qedi->tmf_thread);
  1916. qedi->tmf_thread = NULL;
  1917. }
  1918. if (qedi->offload_thread) {
  1919. flush_workqueue(qedi->offload_thread);
  1920. destroy_workqueue(qedi->offload_thread);
  1921. qedi->offload_thread = NULL;
  1922. }
  1923. #ifdef CONFIG_DEBUG_FS
  1924. qedi_dbg_host_exit(&qedi->dbg_ctx);
  1925. #endif
  1926. if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags))
  1927. qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
  1928. qedi_sync_free_irqs(qedi);
  1929. if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
  1930. qedi_ops->stop(qedi->cdev);
  1931. qedi_ops->ll2->stop(qedi->cdev);
  1932. }
  1933. if (mode == QEDI_MODE_NORMAL)
  1934. qedi_free_iscsi_pf_param(qedi);
  1935. rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
  1936. if (rval)
  1937. QEDI_ERR(&qedi->dbg_ctx, "Failed to send drv state to MFW\n");
  1938. if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
  1939. qedi_ops->common->slowpath_stop(qedi->cdev);
  1940. qedi_ops->common->remove(qedi->cdev);
  1941. }
  1942. qedi_destroy_fp(qedi);
  1943. if (mode == QEDI_MODE_NORMAL) {
  1944. qedi_release_cid_que(qedi);
  1945. qedi_cm_free_mem(qedi);
  1946. qedi_free_uio(qedi->udev);
  1947. qedi_free_itt(qedi);
  1948. iscsi_host_remove(qedi->shost);
  1949. iscsi_host_free(qedi->shost);
  1950. if (qedi->ll2_recv_thread) {
  1951. kthread_stop(qedi->ll2_recv_thread);
  1952. qedi->ll2_recv_thread = NULL;
  1953. }
  1954. qedi_ll2_free_skbs(qedi);
  1955. if (qedi->boot_kset)
  1956. iscsi_boot_destroy_kset(qedi->boot_kset);
  1957. }
  1958. }
  1959. static int __qedi_probe(struct pci_dev *pdev, int mode)
  1960. {
  1961. struct qedi_ctx *qedi;
  1962. struct qed_ll2_params params;
  1963. u32 dp_module = 0;
  1964. u8 dp_level = 0;
  1965. bool is_vf = false;
  1966. char host_buf[16];
  1967. struct qed_link_params link_params;
  1968. struct qed_slowpath_params sp_params;
  1969. struct qed_probe_params qed_params;
  1970. void *task_start, *task_end;
  1971. int rc;
  1972. u16 tmp;
  1973. if (mode != QEDI_MODE_RECOVERY) {
  1974. qedi = qedi_host_alloc(pdev);
  1975. if (!qedi) {
  1976. rc = -ENOMEM;
  1977. goto exit_probe;
  1978. }
  1979. } else {
  1980. qedi = pci_get_drvdata(pdev);
  1981. }
  1982. memset(&qed_params, 0, sizeof(qed_params));
  1983. qed_params.protocol = QED_PROTOCOL_ISCSI;
  1984. qed_params.dp_module = dp_module;
  1985. qed_params.dp_level = dp_level;
  1986. qed_params.is_vf = is_vf;
  1987. qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);
  1988. if (!qedi->cdev) {
  1989. rc = -ENODEV;
  1990. QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n");
  1991. goto free_host;
  1992. }
  1993. atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
  1994. rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
  1995. if (rc)
  1996. goto free_host;
  1997. if (mode != QEDI_MODE_RECOVERY) {
  1998. rc = qedi_set_iscsi_pf_param(qedi);
  1999. if (rc) {
  2000. rc = -ENOMEM;
  2001. QEDI_ERR(&qedi->dbg_ctx,
  2002. "Set iSCSI pf param fail\n");
  2003. goto free_host;
  2004. }
  2005. }
  2006. qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
  2007. rc = qedi_prepare_fp(qedi);
  2008. if (rc) {
  2009. QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath.\n");
  2010. goto free_pf_params;
  2011. }
  2012. /* Start the Slowpath-process */
  2013. memset(&sp_params, 0, sizeof(struct qed_slowpath_params));
  2014. sp_params.int_mode = QED_INT_MODE_MSIX;
  2015. sp_params.drv_major = QEDI_DRIVER_MAJOR_VER;
  2016. sp_params.drv_minor = QEDI_DRIVER_MINOR_VER;
  2017. sp_params.drv_rev = QEDI_DRIVER_REV_VER;
  2018. sp_params.drv_eng = QEDI_DRIVER_ENG_VER;
  2019. strlcpy(sp_params.name, "qedi iSCSI", QED_DRV_VER_STR_SIZE);
  2020. rc = qedi_ops->common->slowpath_start(qedi->cdev, &sp_params);
  2021. if (rc) {
  2022. QEDI_ERR(&qedi->dbg_ctx, "Cannot start slowpath\n");
  2023. goto stop_hw;
  2024. }
  2025. /* update_pf_params needs to be called before and after slowpath
  2026. * start
  2027. */
  2028. qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
  2029. rc = qedi_setup_int(qedi);
  2030. if (rc)
  2031. goto stop_iscsi_func;
  2032. qedi_ops->common->set_power_state(qedi->cdev, PCI_D0);
  2033. /* Learn information crucial for qedi to progress */
  2034. rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
  2035. if (rc)
  2036. goto stop_iscsi_func;
  2037. /* Record BDQ producer doorbell addresses */
  2038. qedi->bdq_primary_prod = qedi->dev_info.primary_dbq_rq_addr;
  2039. qedi->bdq_secondary_prod = qedi->dev_info.secondary_bdq_rq_addr;
  2040. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
  2041. "BDQ primary_prod=%p secondary_prod=%p.\n",
  2042. qedi->bdq_primary_prod,
  2043. qedi->bdq_secondary_prod);
  2044. /*
  2045. * We need to write the number of BDs in the BDQ we've preallocated so
  2046. * the f/w will do a prefetch and we'll get an unsolicited CQE when a
  2047. * packet arrives.
  2048. */
  2049. qedi->bdq_prod_idx = QEDI_BDQ_NUM;
  2050. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
  2051. "Writing %d to primary and secondary BDQ doorbell registers.\n",
  2052. qedi->bdq_prod_idx);
  2053. writew(qedi->bdq_prod_idx, qedi->bdq_primary_prod);
  2054. tmp = readw(qedi->bdq_primary_prod);
  2055. writew(qedi->bdq_prod_idx, qedi->bdq_secondary_prod);
  2056. tmp = readw(qedi->bdq_secondary_prod);
  2057. ether_addr_copy(qedi->mac, qedi->dev_info.common.hw_mac);
  2058. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n",
  2059. qedi->mac);
  2060. sprintf(host_buf, "host_%d", qedi->shost->host_no);
  2061. qedi_ops->common->set_name(qedi->cdev, host_buf);
  2062. qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
  2063. memset(&params, 0, sizeof(params));
  2064. params.mtu = DEF_PATH_MTU + IPV6_HDR_LEN + TCP_HDR_LEN;
  2065. qedi->ll2_mtu = DEF_PATH_MTU;
  2066. params.drop_ttl0_packets = 0;
  2067. params.rx_vlan_stripping = 1;
  2068. ether_addr_copy(params.ll2_mac_address, qedi->dev_info.common.hw_mac);
  2069. if (mode != QEDI_MODE_RECOVERY) {
  2070. /* set up rx path */
  2071. INIT_LIST_HEAD(&qedi->ll2_skb_list);
  2072. spin_lock_init(&qedi->ll2_lock);
  2073. /* start qedi context */
  2074. spin_lock_init(&qedi->hba_lock);
  2075. spin_lock_init(&qedi->task_idx_lock);
  2076. mutex_init(&qedi->stats_lock);
  2077. }
  2078. qedi_ops->ll2->register_cb_ops(qedi->cdev, &qedi_ll2_cb_ops, qedi);
  2079. qedi_ops->ll2->start(qedi->cdev, &params);
  2080. if (mode != QEDI_MODE_RECOVERY) {
  2081. qedi->ll2_recv_thread = kthread_run(qedi_ll2_recv_thread,
  2082. (void *)qedi,
  2083. "qedi_ll2_thread");
  2084. }
  2085. rc = qedi_ops->start(qedi->cdev, &qedi->tasks,
  2086. qedi, qedi_iscsi_event_cb);
  2087. if (rc) {
  2088. rc = -ENODEV;
  2089. QEDI_ERR(&qedi->dbg_ctx, "Cannot start iSCSI function\n");
  2090. goto stop_slowpath;
  2091. }
  2092. task_start = qedi_get_task_mem(&qedi->tasks, 0);
  2093. task_end = qedi_get_task_mem(&qedi->tasks, MAX_TID_BLOCKS_ISCSI - 1);
  2094. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC,
  2095. "Task context start=%p, end=%p block_size=%u.\n",
  2096. task_start, task_end, qedi->tasks.size);
  2097. memset(&link_params, 0, sizeof(link_params));
  2098. link_params.link_up = true;
  2099. rc = qedi_ops->common->set_link(qedi->cdev, &link_params);
  2100. if (rc) {
  2101. QEDI_WARN(&qedi->dbg_ctx, "Link set up failed.\n");
  2102. atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
  2103. }
  2104. #ifdef CONFIG_DEBUG_FS
  2105. qedi_dbg_host_init(&qedi->dbg_ctx, qedi_debugfs_ops,
  2106. qedi_dbg_fops);
  2107. #endif
  2108. QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
  2109. "QLogic FastLinQ iSCSI Module qedi %s, FW %d.%d.%d.%d\n",
  2110. QEDI_MODULE_VERSION, FW_MAJOR_VERSION, FW_MINOR_VERSION,
  2111. FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
  2112. if (mode == QEDI_MODE_NORMAL) {
  2113. if (iscsi_host_add(qedi->shost, &pdev->dev)) {
  2114. QEDI_ERR(&qedi->dbg_ctx,
  2115. "Could not add iscsi host\n");
  2116. rc = -ENOMEM;
  2117. goto remove_host;
  2118. }
  2119. /* Allocate uio buffers */
  2120. rc = qedi_alloc_uio_rings(qedi);
  2121. if (rc) {
  2122. QEDI_ERR(&qedi->dbg_ctx,
  2123. "UIO alloc ring failed err=%d\n", rc);
  2124. goto remove_host;
  2125. }
  2126. rc = qedi_init_uio(qedi);
  2127. if (rc) {
  2128. QEDI_ERR(&qedi->dbg_ctx,
  2129. "UIO init failed, err=%d\n", rc);
  2130. goto free_uio;
  2131. }
  2132. /* host the array on iscsi_conn */
  2133. rc = qedi_setup_cid_que(qedi);
  2134. if (rc) {
  2135. QEDI_ERR(&qedi->dbg_ctx,
  2136. "Could not setup cid que\n");
  2137. goto free_uio;
  2138. }
  2139. rc = qedi_cm_alloc_mem(qedi);
  2140. if (rc) {
  2141. QEDI_ERR(&qedi->dbg_ctx,
  2142. "Could not alloc cm memory\n");
  2143. goto free_cid_que;
  2144. }
  2145. rc = qedi_alloc_itt(qedi);
  2146. if (rc) {
  2147. QEDI_ERR(&qedi->dbg_ctx,
  2148. "Could not alloc itt memory\n");
  2149. goto free_cid_que;
  2150. }
  2151. sprintf(host_buf, "host_%d", qedi->shost->host_no);
  2152. qedi->tmf_thread = create_singlethread_workqueue(host_buf);
  2153. if (!qedi->tmf_thread) {
  2154. QEDI_ERR(&qedi->dbg_ctx,
  2155. "Unable to start tmf thread!\n");
  2156. rc = -ENODEV;
  2157. goto free_cid_que;
  2158. }
  2159. sprintf(host_buf, "qedi_ofld%d", qedi->shost->host_no);
  2160. qedi->offload_thread = create_workqueue(host_buf);
  2161. if (!qedi->offload_thread) {
  2162. QEDI_ERR(&qedi->dbg_ctx,
  2163. "Unable to start offload thread!\n");
  2164. rc = -ENODEV;
  2165. goto free_cid_que;
  2166. }
  2167. /* F/w needs 1st task context memory entry for performance */
  2168. set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
  2169. atomic_set(&qedi->num_offloads, 0);
  2170. if (qedi_setup_boot_info(qedi))
  2171. QEDI_ERR(&qedi->dbg_ctx,
  2172. "No iSCSI boot target configured\n");
  2173. rc = qedi_ops->common->update_drv_state(qedi->cdev, true);
  2174. if (rc)
  2175. QEDI_ERR(&qedi->dbg_ctx,
  2176. "Failed to send drv state to MFW\n");
  2177. }
  2178. return 0;
  2179. free_cid_que:
  2180. qedi_release_cid_que(qedi);
  2181. free_uio:
  2182. qedi_free_uio(qedi->udev);
  2183. remove_host:
  2184. #ifdef CONFIG_DEBUG_FS
  2185. qedi_dbg_host_exit(&qedi->dbg_ctx);
  2186. #endif
  2187. iscsi_host_remove(qedi->shost);
  2188. stop_iscsi_func:
  2189. qedi_ops->stop(qedi->cdev);
  2190. stop_slowpath:
  2191. qedi_ops->common->slowpath_stop(qedi->cdev);
  2192. stop_hw:
  2193. qedi_ops->common->remove(qedi->cdev);
  2194. free_pf_params:
  2195. qedi_free_iscsi_pf_param(qedi);
  2196. free_host:
  2197. iscsi_host_free(qedi->shost);
  2198. exit_probe:
  2199. return rc;
  2200. }
  2201. static int qedi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  2202. {
  2203. return __qedi_probe(pdev, QEDI_MODE_NORMAL);
  2204. }
  2205. static void qedi_remove(struct pci_dev *pdev)
  2206. {
  2207. __qedi_remove(pdev, QEDI_MODE_NORMAL);
  2208. }
  2209. static struct pci_device_id qedi_pci_tbl[] = {
  2210. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x165E) },
  2211. { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, 0x8084) },
  2212. { 0 },
  2213. };
  2214. MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
  2215. static enum cpuhp_state qedi_cpuhp_state;
  2216. static struct pci_driver qedi_pci_driver = {
  2217. .name = QEDI_MODULE_NAME,
  2218. .id_table = qedi_pci_tbl,
  2219. .probe = qedi_probe,
  2220. .remove = qedi_remove,
  2221. };
  2222. static int __init qedi_init(void)
  2223. {
  2224. struct qedi_percpu_s *p;
  2225. int cpu, rc = 0;
  2226. qedi_ops = qed_get_iscsi_ops();
  2227. if (!qedi_ops) {
  2228. QEDI_ERR(NULL, "Failed to get qed iSCSI operations\n");
  2229. return -EINVAL;
  2230. }
  2231. #ifdef CONFIG_DEBUG_FS
  2232. qedi_dbg_init("qedi");
  2233. #endif
  2234. qedi_scsi_transport = iscsi_register_transport(&qedi_iscsi_transport);
  2235. if (!qedi_scsi_transport) {
  2236. QEDI_ERR(NULL, "Could not register qedi transport");
  2237. rc = -ENOMEM;
  2238. goto exit_qedi_init_1;
  2239. }
  2240. for_each_possible_cpu(cpu) {
  2241. p = &per_cpu(qedi_percpu, cpu);
  2242. INIT_LIST_HEAD(&p->work_list);
  2243. spin_lock_init(&p->p_work_lock);
  2244. p->iothread = NULL;
  2245. }
  2246. rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/qedi:online",
  2247. qedi_cpu_online, qedi_cpu_offline);
  2248. if (rc < 0)
  2249. goto exit_qedi_init_2;
  2250. qedi_cpuhp_state = rc;
  2251. rc = pci_register_driver(&qedi_pci_driver);
  2252. if (rc) {
  2253. QEDI_ERR(NULL, "Failed to register driver\n");
  2254. goto exit_qedi_hp;
  2255. }
  2256. return 0;
  2257. exit_qedi_hp:
  2258. cpuhp_remove_state(qedi_cpuhp_state);
  2259. exit_qedi_init_2:
  2260. iscsi_unregister_transport(&qedi_iscsi_transport);
  2261. exit_qedi_init_1:
  2262. #ifdef CONFIG_DEBUG_FS
  2263. qedi_dbg_exit();
  2264. #endif
  2265. qed_put_iscsi_ops();
  2266. return rc;
  2267. }
  2268. static void __exit qedi_cleanup(void)
  2269. {
  2270. pci_unregister_driver(&qedi_pci_driver);
  2271. cpuhp_remove_state(qedi_cpuhp_state);
  2272. iscsi_unregister_transport(&qedi_iscsi_transport);
  2273. #ifdef CONFIG_DEBUG_FS
  2274. qedi_dbg_exit();
  2275. #endif
  2276. qed_put_iscsi_ops();
  2277. }
  2278. MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx iSCSI Module");
  2279. MODULE_LICENSE("GPL");
  2280. MODULE_AUTHOR("QLogic Corporation");
  2281. MODULE_VERSION(QEDI_MODULE_VERSION);
  2282. module_init(qedi_init);
  2283. module_exit(qedi_cleanup);