vnic_dev.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250
  1. /*
  2. * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. *
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/errno.h>
  21. #include <linux/types.h>
  22. #include <linux/pci.h>
  23. #include <linux/delay.h>
  24. #include <linux/if_ether.h>
  25. #include "vnic_resource.h"
  26. #include "vnic_devcmd.h"
  27. #include "vnic_dev.h"
  28. #include "vnic_wq.h"
  29. #include "vnic_stats.h"
  30. #include "enic.h"
  31. #define VNIC_MAX_RES_HDR_SIZE \
  32. (sizeof(struct vnic_resource_header) + \
  33. sizeof(struct vnic_resource) * RES_TYPE_MAX)
  34. #define VNIC_RES_STRIDE 128
  35. void *vnic_dev_priv(struct vnic_dev *vdev)
  36. {
  37. return vdev->priv;
  38. }
  39. static int vnic_dev_discover_res(struct vnic_dev *vdev,
  40. struct vnic_dev_bar *bar, unsigned int num_bars)
  41. {
  42. struct vnic_resource_header __iomem *rh;
  43. struct mgmt_barmap_hdr __iomem *mrh;
  44. struct vnic_resource __iomem *r;
  45. u8 type;
  46. if (num_bars == 0)
  47. return -EINVAL;
  48. if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
  49. vdev_err(vdev, "vNIC BAR0 res hdr length error\n");
  50. return -EINVAL;
  51. }
  52. rh = bar->vaddr;
  53. mrh = bar->vaddr;
  54. if (!rh) {
  55. vdev_err(vdev, "vNIC BAR0 res hdr not mem-mapped\n");
  56. return -EINVAL;
  57. }
  58. /* Check for mgmt vnic in addition to normal vnic */
  59. if ((ioread32(&rh->magic) != VNIC_RES_MAGIC) ||
  60. (ioread32(&rh->version) != VNIC_RES_VERSION)) {
  61. if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) ||
  62. (ioread32(&mrh->version) != MGMTVNIC_VERSION)) {
  63. vdev_err(vdev, "vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n",
  64. VNIC_RES_MAGIC, VNIC_RES_VERSION,
  65. MGMTVNIC_MAGIC, MGMTVNIC_VERSION,
  66. ioread32(&rh->magic), ioread32(&rh->version));
  67. return -EINVAL;
  68. }
  69. }
  70. if (ioread32(&mrh->magic) == MGMTVNIC_MAGIC)
  71. r = (struct vnic_resource __iomem *)(mrh + 1);
  72. else
  73. r = (struct vnic_resource __iomem *)(rh + 1);
  74. while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
  75. u8 bar_num = ioread8(&r->bar);
  76. u32 bar_offset = ioread32(&r->bar_offset);
  77. u32 count = ioread32(&r->count);
  78. u32 len;
  79. r++;
  80. if (bar_num >= num_bars)
  81. continue;
  82. if (!bar[bar_num].len || !bar[bar_num].vaddr)
  83. continue;
  84. switch (type) {
  85. case RES_TYPE_WQ:
  86. case RES_TYPE_RQ:
  87. case RES_TYPE_CQ:
  88. case RES_TYPE_INTR_CTRL:
  89. /* each count is stride bytes long */
  90. len = count * VNIC_RES_STRIDE;
  91. if (len + bar_offset > bar[bar_num].len) {
  92. vdev_err(vdev, "vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n",
  93. type, bar_offset, len,
  94. bar[bar_num].len);
  95. return -EINVAL;
  96. }
  97. break;
  98. case RES_TYPE_INTR_PBA_LEGACY:
  99. case RES_TYPE_DEVCMD:
  100. case RES_TYPE_DEVCMD2:
  101. len = count;
  102. break;
  103. default:
  104. continue;
  105. }
  106. vdev->res[type].count = count;
  107. vdev->res[type].vaddr = (char __iomem *)bar[bar_num].vaddr +
  108. bar_offset;
  109. vdev->res[type].bus_addr = bar[bar_num].bus_addr + bar_offset;
  110. }
  111. return 0;
  112. }
  113. unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
  114. enum vnic_res_type type)
  115. {
  116. return vdev->res[type].count;
  117. }
  118. EXPORT_SYMBOL(vnic_dev_get_res_count);
  119. void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
  120. unsigned int index)
  121. {
  122. if (!vdev->res[type].vaddr)
  123. return NULL;
  124. switch (type) {
  125. case RES_TYPE_WQ:
  126. case RES_TYPE_RQ:
  127. case RES_TYPE_CQ:
  128. case RES_TYPE_INTR_CTRL:
  129. return (char __iomem *)vdev->res[type].vaddr +
  130. index * VNIC_RES_STRIDE;
  131. default:
  132. return (char __iomem *)vdev->res[type].vaddr;
  133. }
  134. }
  135. EXPORT_SYMBOL(vnic_dev_get_res);
  136. static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
  137. unsigned int desc_count, unsigned int desc_size)
  138. {
  139. /* The base address of the desc rings must be 512 byte aligned.
  140. * Descriptor count is aligned to groups of 32 descriptors. A
  141. * count of 0 means the maximum 4096 descriptors. Descriptor
  142. * size is aligned to 16 bytes.
  143. */
  144. unsigned int count_align = 32;
  145. unsigned int desc_align = 16;
  146. ring->base_align = 512;
  147. if (desc_count == 0)
  148. desc_count = 4096;
  149. ring->desc_count = ALIGN(desc_count, count_align);
  150. ring->desc_size = ALIGN(desc_size, desc_align);
  151. ring->size = ring->desc_count * ring->desc_size;
  152. ring->size_unaligned = ring->size + ring->base_align;
  153. return ring->size_unaligned;
  154. }
  155. void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
  156. {
  157. memset(ring->descs, 0, ring->size);
  158. }
  159. int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
  160. unsigned int desc_count, unsigned int desc_size)
  161. {
  162. vnic_dev_desc_ring_size(ring, desc_count, desc_size);
  163. ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
  164. ring->size_unaligned,
  165. &ring->base_addr_unaligned);
  166. if (!ring->descs_unaligned) {
  167. vdev_err(vdev, "Failed to allocate ring (size=%d), aborting\n",
  168. (int)ring->size);
  169. return -ENOMEM;
  170. }
  171. ring->base_addr = ALIGN(ring->base_addr_unaligned,
  172. ring->base_align);
  173. ring->descs = (u8 *)ring->descs_unaligned +
  174. (ring->base_addr - ring->base_addr_unaligned);
  175. vnic_dev_clear_desc_ring(ring);
  176. ring->desc_avail = ring->desc_count - 1;
  177. return 0;
  178. }
  179. void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
  180. {
  181. if (ring->descs) {
  182. pci_free_consistent(vdev->pdev,
  183. ring->size_unaligned,
  184. ring->descs_unaligned,
  185. ring->base_addr_unaligned);
  186. ring->descs = NULL;
  187. }
  188. }
  189. static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  190. int wait)
  191. {
  192. struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
  193. unsigned int i;
  194. int delay;
  195. u32 status;
  196. int err;
  197. status = ioread32(&devcmd->status);
  198. if (status == 0xFFFFFFFF) {
  199. /* PCI-e target device is gone */
  200. return -ENODEV;
  201. }
  202. if (status & STAT_BUSY) {
  203. vdev_neterr(vdev, "Busy devcmd %d\n", _CMD_N(cmd));
  204. return -EBUSY;
  205. }
  206. if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
  207. for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
  208. writeq(vdev->args[i], &devcmd->args[i]);
  209. wmb();
  210. }
  211. iowrite32(cmd, &devcmd->cmd);
  212. if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
  213. return 0;
  214. for (delay = 0; delay < wait; delay++) {
  215. udelay(100);
  216. status = ioread32(&devcmd->status);
  217. if (status == 0xFFFFFFFF) {
  218. /* PCI-e target device is gone */
  219. return -ENODEV;
  220. }
  221. if (!(status & STAT_BUSY)) {
  222. if (status & STAT_ERROR) {
  223. err = (int)readq(&devcmd->args[0]);
  224. if (err == ERR_EINVAL &&
  225. cmd == CMD_CAPABILITY)
  226. return -err;
  227. if (err != ERR_ECMDUNKNOWN ||
  228. cmd != CMD_CAPABILITY)
  229. vdev_neterr(vdev, "Error %d devcmd %d\n",
  230. err, _CMD_N(cmd));
  231. return -err;
  232. }
  233. if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
  234. rmb();
  235. for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
  236. vdev->args[i] = readq(&devcmd->args[i]);
  237. }
  238. return 0;
  239. }
  240. }
  241. vdev_neterr(vdev, "Timedout devcmd %d\n", _CMD_N(cmd));
  242. return -ETIMEDOUT;
  243. }
  244. static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  245. int wait)
  246. {
  247. struct devcmd2_controller *dc2c = vdev->devcmd2;
  248. struct devcmd2_result *result;
  249. u8 color;
  250. unsigned int i;
  251. int delay, err;
  252. u32 fetch_index, new_posted;
  253. u32 posted = dc2c->posted;
  254. fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index);
  255. if (fetch_index == 0xFFFFFFFF)
  256. return -ENODEV;
  257. new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
  258. if (new_posted == fetch_index) {
  259. vdev_neterr(vdev, "devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
  260. _CMD_N(cmd), fetch_index, posted);
  261. return -EBUSY;
  262. }
  263. dc2c->cmd_ring[posted].cmd = cmd;
  264. dc2c->cmd_ring[posted].flags = 0;
  265. if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
  266. dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
  267. if (_CMD_DIR(cmd) & _CMD_DIR_WRITE)
  268. for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
  269. dc2c->cmd_ring[posted].args[i] = vdev->args[i];
  270. /* Adding write memory barrier prevents compiler and/or CPU reordering,
  271. * thus avoiding descriptor posting before descriptor is initialized.
  272. * Otherwise, hardware can read stale descriptor fields.
  273. */
  274. wmb();
  275. iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
  276. dc2c->posted = new_posted;
  277. if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
  278. return 0;
  279. result = dc2c->result + dc2c->next_result;
  280. color = dc2c->color;
  281. dc2c->next_result++;
  282. if (dc2c->next_result == dc2c->result_size) {
  283. dc2c->next_result = 0;
  284. dc2c->color = dc2c->color ? 0 : 1;
  285. }
  286. for (delay = 0; delay < wait; delay++) {
  287. if (result->color == color) {
  288. if (result->error) {
  289. err = result->error;
  290. if (err != ERR_ECMDUNKNOWN ||
  291. cmd != CMD_CAPABILITY)
  292. vdev_neterr(vdev, "Error %d devcmd %d\n",
  293. err, _CMD_N(cmd));
  294. return -err;
  295. }
  296. if (_CMD_DIR(cmd) & _CMD_DIR_READ)
  297. for (i = 0; i < VNIC_DEVCMD2_NARGS; i++)
  298. vdev->args[i] = result->results[i];
  299. return 0;
  300. }
  301. udelay(100);
  302. }
  303. vdev_neterr(vdev, "devcmd %d timed out\n", _CMD_N(cmd));
  304. return -ETIMEDOUT;
  305. }
  306. static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
  307. {
  308. vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
  309. if (!vdev->devcmd)
  310. return -ENODEV;
  311. vdev->devcmd_rtn = _vnic_dev_cmd;
  312. return 0;
  313. }
  314. static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
  315. {
  316. int err;
  317. unsigned int fetch_index;
  318. if (vdev->devcmd2)
  319. return 0;
  320. vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_KERNEL);
  321. if (!vdev->devcmd2)
  322. return -ENOMEM;
  323. vdev->devcmd2->color = 1;
  324. vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
  325. err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE,
  326. DEVCMD2_DESC_SIZE);
  327. if (err)
  328. goto err_free_devcmd2;
  329. fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
  330. if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
  331. vdev_err(vdev, "Fatal error in devcmd2 init - hardware surprise removal\n");
  332. return -ENODEV;
  333. }
  334. enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0,
  335. 0);
  336. vdev->devcmd2->posted = fetch_index;
  337. vnic_wq_enable(&vdev->devcmd2->wq);
  338. err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
  339. DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
  340. if (err)
  341. goto err_free_wq;
  342. vdev->devcmd2->result = vdev->devcmd2->results_ring.descs;
  343. vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs;
  344. vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
  345. vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr |
  346. VNIC_PADDR_TARGET;
  347. vdev->args[1] = DEVCMD2_RING_SIZE;
  348. err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
  349. if (err)
  350. goto err_free_desc_ring;
  351. vdev->devcmd_rtn = _vnic_dev_cmd2;
  352. return 0;
  353. err_free_desc_ring:
  354. vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
  355. err_free_wq:
  356. vnic_wq_disable(&vdev->devcmd2->wq);
  357. vnic_wq_free(&vdev->devcmd2->wq);
  358. err_free_devcmd2:
  359. kfree(vdev->devcmd2);
  360. vdev->devcmd2 = NULL;
  361. return err;
  362. }
  363. static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
  364. {
  365. vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
  366. vnic_wq_disable(&vdev->devcmd2->wq);
  367. vnic_wq_free(&vdev->devcmd2->wq);
  368. kfree(vdev->devcmd2);
  369. }
  370. static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
  371. enum vnic_devcmd_cmd proxy_cmd, enum vnic_devcmd_cmd cmd,
  372. u64 *a0, u64 *a1, int wait)
  373. {
  374. u32 status;
  375. int err;
  376. memset(vdev->args, 0, sizeof(vdev->args));
  377. vdev->args[0] = vdev->proxy_index;
  378. vdev->args[1] = cmd;
  379. vdev->args[2] = *a0;
  380. vdev->args[3] = *a1;
  381. err = vdev->devcmd_rtn(vdev, proxy_cmd, wait);
  382. if (err)
  383. return err;
  384. status = (u32)vdev->args[0];
  385. if (status & STAT_ERROR) {
  386. err = (int)vdev->args[1];
  387. if (err != ERR_ECMDUNKNOWN ||
  388. cmd != CMD_CAPABILITY)
  389. vdev_neterr(vdev, "Error %d proxy devcmd %d\n",
  390. err, _CMD_N(cmd));
  391. return err;
  392. }
  393. *a0 = vdev->args[1];
  394. *a1 = vdev->args[2];
  395. return 0;
  396. }
  397. static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
  398. enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
  399. {
  400. int err;
  401. vdev->args[0] = *a0;
  402. vdev->args[1] = *a1;
  403. err = vdev->devcmd_rtn(vdev, cmd, wait);
  404. *a0 = vdev->args[0];
  405. *a1 = vdev->args[1];
  406. return err;
  407. }
  408. void vnic_dev_cmd_proxy_by_index_start(struct vnic_dev *vdev, u16 index)
  409. {
  410. vdev->proxy = PROXY_BY_INDEX;
  411. vdev->proxy_index = index;
  412. }
  413. void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev)
  414. {
  415. vdev->proxy = PROXY_NONE;
  416. vdev->proxy_index = 0;
  417. }
  418. int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  419. u64 *a0, u64 *a1, int wait)
  420. {
  421. memset(vdev->args, 0, sizeof(vdev->args));
  422. switch (vdev->proxy) {
  423. case PROXY_BY_INDEX:
  424. return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_INDEX, cmd,
  425. a0, a1, wait);
  426. case PROXY_BY_BDF:
  427. return vnic_dev_cmd_proxy(vdev, CMD_PROXY_BY_BDF, cmd,
  428. a0, a1, wait);
  429. case PROXY_NONE:
  430. default:
  431. return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
  432. }
  433. }
  434. static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
  435. {
  436. u64 a0 = (u32)cmd, a1 = 0;
  437. int wait = 1000;
  438. int err;
  439. err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
  440. return !(err || a0);
  441. }
  442. int vnic_dev_fw_info(struct vnic_dev *vdev,
  443. struct vnic_devcmd_fw_info **fw_info)
  444. {
  445. u64 a0, a1 = 0;
  446. int wait = 1000;
  447. int err = 0;
  448. if (!vdev->fw_info) {
  449. vdev->fw_info = pci_zalloc_consistent(vdev->pdev,
  450. sizeof(struct vnic_devcmd_fw_info),
  451. &vdev->fw_info_pa);
  452. if (!vdev->fw_info)
  453. return -ENOMEM;
  454. a0 = vdev->fw_info_pa;
  455. a1 = sizeof(struct vnic_devcmd_fw_info);
  456. /* only get fw_info once and cache it */
  457. if (vnic_dev_capable(vdev, CMD_MCPU_FW_INFO))
  458. err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO,
  459. &a0, &a1, wait);
  460. else
  461. err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO_OLD,
  462. &a0, &a1, wait);
  463. }
  464. *fw_info = vdev->fw_info;
  465. return err;
  466. }
  467. int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
  468. void *value)
  469. {
  470. u64 a0, a1;
  471. int wait = 1000;
  472. int err;
  473. a0 = offset;
  474. a1 = size;
  475. err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
  476. switch (size) {
  477. case 1: *(u8 *)value = (u8)a0; break;
  478. case 2: *(u16 *)value = (u16)a0; break;
  479. case 4: *(u32 *)value = (u32)a0; break;
  480. case 8: *(u64 *)value = a0; break;
  481. default: BUG(); break;
  482. }
  483. return err;
  484. }
  485. int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
  486. {
  487. u64 a0, a1;
  488. int wait = 1000;
  489. if (!vdev->stats) {
  490. vdev->stats = pci_alloc_consistent(vdev->pdev,
  491. sizeof(struct vnic_stats), &vdev->stats_pa);
  492. if (!vdev->stats)
  493. return -ENOMEM;
  494. }
  495. *stats = vdev->stats;
  496. a0 = vdev->stats_pa;
  497. a1 = sizeof(struct vnic_stats);
  498. return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
  499. }
  500. int vnic_dev_close(struct vnic_dev *vdev)
  501. {
  502. u64 a0 = 0, a1 = 0;
  503. int wait = 1000;
  504. return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
  505. }
  506. int vnic_dev_enable_wait(struct vnic_dev *vdev)
  507. {
  508. u64 a0 = 0, a1 = 0;
  509. int wait = 1000;
  510. if (vnic_dev_capable(vdev, CMD_ENABLE_WAIT))
  511. return vnic_dev_cmd(vdev, CMD_ENABLE_WAIT, &a0, &a1, wait);
  512. else
  513. return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
  514. }
  515. int vnic_dev_disable(struct vnic_dev *vdev)
  516. {
  517. u64 a0 = 0, a1 = 0;
  518. int wait = 1000;
  519. return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
  520. }
  521. int vnic_dev_open(struct vnic_dev *vdev, int arg)
  522. {
  523. u64 a0 = (u32)arg, a1 = 0;
  524. int wait = 1000;
  525. return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
  526. }
  527. int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
  528. {
  529. u64 a0 = 0, a1 = 0;
  530. int wait = 1000;
  531. int err;
  532. *done = 0;
  533. err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
  534. if (err)
  535. return err;
  536. *done = (a0 == 0);
  537. return 0;
  538. }
  539. int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
  540. {
  541. u64 a0 = (u32)arg, a1 = 0;
  542. int wait = 1000;
  543. return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
  544. }
  545. int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
  546. {
  547. u64 a0 = 0, a1 = 0;
  548. int wait = 1000;
  549. int err;
  550. *done = 0;
  551. err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
  552. if (err)
  553. return err;
  554. *done = (a0 == 0);
  555. return 0;
  556. }
  557. int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg)
  558. {
  559. u64 a0 = (u32)arg, a1 = 0;
  560. int wait = 1000;
  561. int err;
  562. if (vnic_dev_capable(vdev, CMD_HANG_RESET)) {
  563. return vnic_dev_cmd(vdev, CMD_HANG_RESET,
  564. &a0, &a1, wait);
  565. } else {
  566. err = vnic_dev_soft_reset(vdev, arg);
  567. if (err)
  568. return err;
  569. return vnic_dev_init(vdev, 0);
  570. }
  571. }
  572. int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done)
  573. {
  574. u64 a0 = 0, a1 = 0;
  575. int wait = 1000;
  576. int err;
  577. *done = 0;
  578. if (vnic_dev_capable(vdev, CMD_HANG_RESET_STATUS)) {
  579. err = vnic_dev_cmd(vdev, CMD_HANG_RESET_STATUS,
  580. &a0, &a1, wait);
  581. if (err)
  582. return err;
  583. } else {
  584. return vnic_dev_soft_reset_done(vdev, done);
  585. }
  586. *done = (a0 == 0);
  587. return 0;
  588. }
  589. int vnic_dev_hang_notify(struct vnic_dev *vdev)
  590. {
  591. u64 a0, a1;
  592. int wait = 1000;
  593. return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
  594. }
  595. int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
  596. {
  597. u64 a0, a1;
  598. int wait = 1000;
  599. int err, i;
  600. for (i = 0; i < ETH_ALEN; i++)
  601. mac_addr[i] = 0;
  602. err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
  603. if (err)
  604. return err;
  605. for (i = 0; i < ETH_ALEN; i++)
  606. mac_addr[i] = ((u8 *)&a0)[i];
  607. return 0;
  608. }
  609. int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
  610. int broadcast, int promisc, int allmulti)
  611. {
  612. u64 a0, a1 = 0;
  613. int wait = 1000;
  614. int err;
  615. a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
  616. (multicast ? CMD_PFILTER_MULTICAST : 0) |
  617. (broadcast ? CMD_PFILTER_BROADCAST : 0) |
  618. (promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
  619. (allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
  620. err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
  621. if (err)
  622. vdev_neterr(vdev, "Can't set packet filter\n");
  623. return err;
  624. }
  625. int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr)
  626. {
  627. u64 a0 = 0, a1 = 0;
  628. int wait = 1000;
  629. int err;
  630. int i;
  631. for (i = 0; i < ETH_ALEN; i++)
  632. ((u8 *)&a0)[i] = addr[i];
  633. err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
  634. if (err)
  635. vdev_neterr(vdev, "Can't add addr [%pM], %d\n", addr, err);
  636. return err;
  637. }
  638. int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr)
  639. {
  640. u64 a0 = 0, a1 = 0;
  641. int wait = 1000;
  642. int err;
  643. int i;
  644. for (i = 0; i < ETH_ALEN; i++)
  645. ((u8 *)&a0)[i] = addr[i];
  646. err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
  647. if (err)
  648. vdev_neterr(vdev, "Can't del addr [%pM], %d\n", addr, err);
  649. return err;
  650. }
  651. int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
  652. u8 ig_vlan_rewrite_mode)
  653. {
  654. u64 a0 = ig_vlan_rewrite_mode, a1 = 0;
  655. int wait = 1000;
  656. if (vnic_dev_capable(vdev, CMD_IG_VLAN_REWRITE_MODE))
  657. return vnic_dev_cmd(vdev, CMD_IG_VLAN_REWRITE_MODE,
  658. &a0, &a1, wait);
  659. else
  660. return 0;
  661. }
  662. static int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
  663. void *notify_addr, dma_addr_t notify_pa, u16 intr)
  664. {
  665. u64 a0, a1;
  666. int wait = 1000;
  667. int r;
  668. memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
  669. vdev->notify = notify_addr;
  670. vdev->notify_pa = notify_pa;
  671. a0 = (u64)notify_pa;
  672. a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
  673. a1 += sizeof(struct vnic_devcmd_notify);
  674. r = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  675. vdev->notify_sz = (r == 0) ? (u32)a1 : 0;
  676. return r;
  677. }
  678. int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
  679. {
  680. void *notify_addr;
  681. dma_addr_t notify_pa;
  682. if (vdev->notify || vdev->notify_pa) {
  683. vdev_neterr(vdev, "notify block %p still allocated\n",
  684. vdev->notify);
  685. return -EINVAL;
  686. }
  687. notify_addr = pci_alloc_consistent(vdev->pdev,
  688. sizeof(struct vnic_devcmd_notify),
  689. &notify_pa);
  690. if (!notify_addr)
  691. return -ENOMEM;
  692. return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
  693. }
  694. static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
  695. {
  696. u64 a0, a1;
  697. int wait = 1000;
  698. int err;
  699. a0 = 0; /* paddr = 0 to unset notify buffer */
  700. a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
  701. a1 += sizeof(struct vnic_devcmd_notify);
  702. err = vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
  703. vdev->notify = NULL;
  704. vdev->notify_pa = 0;
  705. vdev->notify_sz = 0;
  706. return err;
  707. }
  708. int vnic_dev_notify_unset(struct vnic_dev *vdev)
  709. {
  710. if (vdev->notify) {
  711. pci_free_consistent(vdev->pdev,
  712. sizeof(struct vnic_devcmd_notify),
  713. vdev->notify,
  714. vdev->notify_pa);
  715. }
  716. return vnic_dev_notify_unsetcmd(vdev);
  717. }
  718. static int vnic_dev_notify_ready(struct vnic_dev *vdev)
  719. {
  720. u32 *words;
  721. unsigned int nwords = vdev->notify_sz / 4;
  722. unsigned int i;
  723. u32 csum;
  724. if (!vdev->notify || !vdev->notify_sz)
  725. return 0;
  726. do {
  727. csum = 0;
  728. memcpy(&vdev->notify_copy, vdev->notify, vdev->notify_sz);
  729. words = (u32 *)&vdev->notify_copy;
  730. for (i = 1; i < nwords; i++)
  731. csum += words[i];
  732. } while (csum != words[0]);
  733. return 1;
  734. }
  735. int vnic_dev_init(struct vnic_dev *vdev, int arg)
  736. {
  737. u64 a0 = (u32)arg, a1 = 0;
  738. int wait = 1000;
  739. int r = 0;
  740. if (vnic_dev_capable(vdev, CMD_INIT))
  741. r = vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
  742. else {
  743. vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
  744. if (a0 & CMD_INITF_DEFAULT_MAC) {
  745. /* Emulate these for old CMD_INIT_v1 which
  746. * didn't pass a0 so no CMD_INITF_*.
  747. */
  748. vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
  749. vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
  750. }
  751. }
  752. return r;
  753. }
  754. int vnic_dev_deinit(struct vnic_dev *vdev)
  755. {
  756. u64 a0 = 0, a1 = 0;
  757. int wait = 1000;
  758. return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
  759. }
  760. void vnic_dev_intr_coal_timer_info_default(struct vnic_dev *vdev)
  761. {
  762. /* Default: hardware intr coal timer is in units of 1.5 usecs */
  763. vdev->intr_coal_timer_info.mul = 2;
  764. vdev->intr_coal_timer_info.div = 3;
  765. vdev->intr_coal_timer_info.max_usec =
  766. vnic_dev_intr_coal_timer_hw_to_usec(vdev, 0xffff);
  767. }
  768. int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev)
  769. {
  770. int wait = 1000;
  771. int err;
  772. memset(vdev->args, 0, sizeof(vdev->args));
  773. if (vnic_dev_capable(vdev, CMD_INTR_COAL_CONVERT))
  774. err = vdev->devcmd_rtn(vdev, CMD_INTR_COAL_CONVERT, wait);
  775. else
  776. err = ERR_ECMDUNKNOWN;
  777. /* Use defaults when firmware doesn't support the devcmd at all or
  778. * supports it for only specific hardware
  779. */
  780. if ((err == ERR_ECMDUNKNOWN) ||
  781. (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) {
  782. vdev_netwarn(vdev, "Using default conversion factor for interrupt coalesce timer\n");
  783. vnic_dev_intr_coal_timer_info_default(vdev);
  784. return 0;
  785. }
  786. if (!err) {
  787. vdev->intr_coal_timer_info.mul = (u32) vdev->args[0];
  788. vdev->intr_coal_timer_info.div = (u32) vdev->args[1];
  789. vdev->intr_coal_timer_info.max_usec = (u32) vdev->args[2];
  790. }
  791. return err;
  792. }
  793. int vnic_dev_link_status(struct vnic_dev *vdev)
  794. {
  795. if (!vnic_dev_notify_ready(vdev))
  796. return 0;
  797. return vdev->notify_copy.link_state;
  798. }
  799. u32 vnic_dev_port_speed(struct vnic_dev *vdev)
  800. {
  801. if (!vnic_dev_notify_ready(vdev))
  802. return 0;
  803. return vdev->notify_copy.port_speed;
  804. }
  805. u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
  806. {
  807. if (!vnic_dev_notify_ready(vdev))
  808. return 0;
  809. return vdev->notify_copy.msglvl;
  810. }
  811. u32 vnic_dev_mtu(struct vnic_dev *vdev)
  812. {
  813. if (!vnic_dev_notify_ready(vdev))
  814. return 0;
  815. return vdev->notify_copy.mtu;
  816. }
  817. void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
  818. enum vnic_dev_intr_mode intr_mode)
  819. {
  820. vdev->intr_mode = intr_mode;
  821. }
  822. enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
  823. struct vnic_dev *vdev)
  824. {
  825. return vdev->intr_mode;
  826. }
  827. u32 vnic_dev_intr_coal_timer_usec_to_hw(struct vnic_dev *vdev, u32 usec)
  828. {
  829. return (usec * vdev->intr_coal_timer_info.mul) /
  830. vdev->intr_coal_timer_info.div;
  831. }
  832. u32 vnic_dev_intr_coal_timer_hw_to_usec(struct vnic_dev *vdev, u32 hw_cycles)
  833. {
  834. return (hw_cycles * vdev->intr_coal_timer_info.div) /
  835. vdev->intr_coal_timer_info.mul;
  836. }
  837. u32 vnic_dev_get_intr_coal_timer_max(struct vnic_dev *vdev)
  838. {
  839. return vdev->intr_coal_timer_info.max_usec;
  840. }
  841. void vnic_dev_unregister(struct vnic_dev *vdev)
  842. {
  843. if (vdev) {
  844. if (vdev->notify)
  845. pci_free_consistent(vdev->pdev,
  846. sizeof(struct vnic_devcmd_notify),
  847. vdev->notify,
  848. vdev->notify_pa);
  849. if (vdev->stats)
  850. pci_free_consistent(vdev->pdev,
  851. sizeof(struct vnic_stats),
  852. vdev->stats, vdev->stats_pa);
  853. if (vdev->fw_info)
  854. pci_free_consistent(vdev->pdev,
  855. sizeof(struct vnic_devcmd_fw_info),
  856. vdev->fw_info, vdev->fw_info_pa);
  857. if (vdev->devcmd2)
  858. vnic_dev_deinit_devcmd2(vdev);
  859. kfree(vdev);
  860. }
  861. }
  862. EXPORT_SYMBOL(vnic_dev_unregister);
  863. struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
  864. void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
  865. unsigned int num_bars)
  866. {
  867. if (!vdev) {
  868. vdev = kzalloc(sizeof(struct vnic_dev), GFP_ATOMIC);
  869. if (!vdev)
  870. return NULL;
  871. }
  872. vdev->priv = priv;
  873. vdev->pdev = pdev;
  874. if (vnic_dev_discover_res(vdev, bar, num_bars))
  875. goto err_out;
  876. return vdev;
  877. err_out:
  878. vnic_dev_unregister(vdev);
  879. return NULL;
  880. }
  881. EXPORT_SYMBOL(vnic_dev_register);
  882. struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
  883. {
  884. return vdev->pdev;
  885. }
  886. EXPORT_SYMBOL(vnic_dev_get_pdev);
  887. int vnic_devcmd_init(struct vnic_dev *vdev)
  888. {
  889. void __iomem *res;
  890. int err;
  891. res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
  892. if (res) {
  893. err = vnic_dev_init_devcmd2(vdev);
  894. if (err)
  895. vdev_warn(vdev, "DEVCMD2 init failed: %d, Using DEVCMD1\n",
  896. err);
  897. else
  898. return 0;
  899. } else {
  900. vdev_warn(vdev, "DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n");
  901. }
  902. err = vnic_dev_init_devcmd1(vdev);
  903. if (err)
  904. vdev_err(vdev, "DEVCMD1 initialization failed: %d\n", err);
  905. return err;
  906. }
  907. int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
  908. {
  909. u64 a0, a1 = len;
  910. int wait = 1000;
  911. dma_addr_t prov_pa;
  912. void *prov_buf;
  913. int ret;
  914. prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
  915. if (!prov_buf)
  916. return -ENOMEM;
  917. memcpy(prov_buf, buf, len);
  918. a0 = prov_pa;
  919. ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait);
  920. pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
  921. return ret;
  922. }
  923. int vnic_dev_enable2(struct vnic_dev *vdev, int active)
  924. {
  925. u64 a0, a1 = 0;
  926. int wait = 1000;
  927. a0 = (active ? CMD_ENABLE2_ACTIVE : 0);
  928. return vnic_dev_cmd(vdev, CMD_ENABLE2, &a0, &a1, wait);
  929. }
  930. static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
  931. int *status)
  932. {
  933. u64 a0 = cmd, a1 = 0;
  934. int wait = 1000;
  935. int ret;
  936. ret = vnic_dev_cmd(vdev, CMD_STATUS, &a0, &a1, wait);
  937. if (!ret)
  938. *status = (int)a0;
  939. return ret;
  940. }
  941. int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status)
  942. {
  943. return vnic_dev_cmd_status(vdev, CMD_ENABLE2, status);
  944. }
  945. int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status)
  946. {
  947. return vnic_dev_cmd_status(vdev, CMD_DEINIT, status);
  948. }
  949. int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
  950. {
  951. u64 a0, a1;
  952. int wait = 1000;
  953. int i;
  954. for (i = 0; i < ETH_ALEN; i++)
  955. ((u8 *)&a0)[i] = mac_addr[i];
  956. return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait);
  957. }
  958. /* vnic_dev_classifier: Add/Delete classifier entries
  959. * @vdev: vdev of the device
  960. * @cmd: CLSF_ADD for Add filter
  961. * CLSF_DEL for Delete filter
  962. * @entry: In case of ADD filter, the caller passes the RQ number in this
  963. * variable.
  964. *
  965. * This function stores the filter_id returned by the firmware in the
  966. * same variable before return;
  967. *
  968. * In case of DEL filter, the caller passes the RQ number. Return
  969. * value is irrelevant.
  970. * @data: filter data
  971. */
  972. int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
  973. struct filter *data)
  974. {
  975. u64 a0, a1;
  976. int wait = 1000;
  977. dma_addr_t tlv_pa;
  978. int ret = -EINVAL;
  979. struct filter_tlv *tlv, *tlv_va;
  980. struct filter_action *action;
  981. u64 tlv_size;
  982. if (cmd == CLSF_ADD) {
  983. tlv_size = sizeof(struct filter) +
  984. sizeof(struct filter_action) +
  985. 2 * sizeof(struct filter_tlv);
  986. tlv_va = pci_alloc_consistent(vdev->pdev, tlv_size, &tlv_pa);
  987. if (!tlv_va)
  988. return -ENOMEM;
  989. tlv = tlv_va;
  990. a0 = tlv_pa;
  991. a1 = tlv_size;
  992. memset(tlv, 0, tlv_size);
  993. tlv->type = CLSF_TLV_FILTER;
  994. tlv->length = sizeof(struct filter);
  995. *(struct filter *)&tlv->val = *data;
  996. tlv = (struct filter_tlv *)((char *)tlv +
  997. sizeof(struct filter_tlv) +
  998. sizeof(struct filter));
  999. tlv->type = CLSF_TLV_ACTION;
  1000. tlv->length = sizeof(struct filter_action);
  1001. action = (struct filter_action *)&tlv->val;
  1002. action->type = FILTER_ACTION_RQ_STEERING;
  1003. action->u.rq_idx = *entry;
  1004. ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
  1005. *entry = (u16)a0;
  1006. pci_free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa);
  1007. } else if (cmd == CLSF_DEL) {
  1008. a0 = *entry;
  1009. ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
  1010. }
  1011. return ret;
  1012. }