port.c 59 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/errno.h>
  33. #include <linux/if_ether.h>
  34. #include <linux/if_vlan.h>
  35. #include <linux/export.h>
  36. #include <linux/mlx4/cmd.h>
  37. #include "mlx4.h"
  38. #include "mlx4_stats.h"
  39. #define MLX4_MAC_VALID (1ull << 63)
  40. #define MLX4_VLAN_VALID (1u << 31)
  41. #define MLX4_VLAN_MASK 0xfff
  42. #define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
  43. #define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
  44. #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
  45. #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
  46. #define MLX4_FLAG2_V_IGNORE_FCS_MASK BIT(1)
  47. #define MLX4_FLAG2_V_USER_MTU_MASK BIT(5)
  48. #define MLX4_FLAG2_V_USER_MAC_MASK BIT(6)
  49. #define MLX4_FLAG_V_MTU_MASK BIT(0)
  50. #define MLX4_FLAG_V_PPRX_MASK BIT(1)
  51. #define MLX4_FLAG_V_PPTX_MASK BIT(2)
  52. #define MLX4_IGNORE_FCS_MASK 0x1
  53. #define MLX4_TC_MAX_NUMBER 8
  54. void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
  55. {
  56. int i;
  57. mutex_init(&table->mutex);
  58. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  59. table->entries[i] = 0;
  60. table->refs[i] = 0;
  61. table->is_dup[i] = false;
  62. }
  63. table->max = 1 << dev->caps.log_num_macs;
  64. table->total = 0;
  65. }
  66. void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
  67. {
  68. int i;
  69. mutex_init(&table->mutex);
  70. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  71. table->entries[i] = 0;
  72. table->refs[i] = 0;
  73. table->is_dup[i] = false;
  74. }
  75. table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
  76. table->total = 0;
  77. }
  78. void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
  79. struct mlx4_roce_gid_table *table)
  80. {
  81. int i;
  82. mutex_init(&table->mutex);
  83. for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
  84. memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
  85. }
  86. static int validate_index(struct mlx4_dev *dev,
  87. struct mlx4_mac_table *table, int index)
  88. {
  89. int err = 0;
  90. if (index < 0 || index >= table->max || !table->entries[index]) {
  91. mlx4_warn(dev, "No valid Mac entry for the given index\n");
  92. err = -EINVAL;
  93. }
  94. return err;
  95. }
  96. static int find_index(struct mlx4_dev *dev,
  97. struct mlx4_mac_table *table, u64 mac)
  98. {
  99. int i;
  100. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  101. if (table->refs[i] &&
  102. (MLX4_MAC_MASK & mac) ==
  103. (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
  104. return i;
  105. }
  106. /* Mac not found */
  107. return -EINVAL;
  108. }
  109. static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
  110. __be64 *entries)
  111. {
  112. struct mlx4_cmd_mailbox *mailbox;
  113. u32 in_mod;
  114. int err;
  115. mailbox = mlx4_alloc_cmd_mailbox(dev);
  116. if (IS_ERR(mailbox))
  117. return PTR_ERR(mailbox);
  118. memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
  119. in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
  120. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  121. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  122. MLX4_CMD_NATIVE);
  123. mlx4_free_cmd_mailbox(dev, mailbox);
  124. return err;
  125. }
  126. int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
  127. {
  128. struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
  129. struct mlx4_mac_table *table = &info->mac_table;
  130. int i;
  131. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  132. if (!table->refs[i])
  133. continue;
  134. if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
  135. *idx = i;
  136. return 0;
  137. }
  138. }
  139. return -ENOENT;
  140. }
  141. EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
  142. static bool mlx4_need_mf_bond(struct mlx4_dev *dev)
  143. {
  144. int i, num_eth_ports = 0;
  145. if (!mlx4_is_mfunc(dev))
  146. return false;
  147. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
  148. ++num_eth_ports;
  149. return (num_eth_ports == 2) ? true : false;
  150. }
  151. int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  152. {
  153. struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
  154. struct mlx4_mac_table *table = &info->mac_table;
  155. int i, err = 0;
  156. int free = -1;
  157. int free_for_dup = -1;
  158. bool dup = mlx4_is_mf_bonded(dev);
  159. u8 dup_port = (port == 1) ? 2 : 1;
  160. struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
  161. bool need_mf_bond = mlx4_need_mf_bond(dev);
  162. bool can_mf_bond = true;
  163. mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d %s duplicate\n",
  164. (unsigned long long)mac, port,
  165. dup ? "with" : "without");
  166. if (need_mf_bond) {
  167. if (port == 1) {
  168. mutex_lock(&table->mutex);
  169. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  170. } else {
  171. mutex_lock(&dup_table->mutex);
  172. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  173. }
  174. } else {
  175. mutex_lock(&table->mutex);
  176. }
  177. if (need_mf_bond) {
  178. int index_at_port = -1;
  179. int index_at_dup_port = -1;
  180. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  181. if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))))
  182. index_at_port = i;
  183. if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]))))
  184. index_at_dup_port = i;
  185. }
  186. /* check that same mac is not in the tables at different indices */
  187. if ((index_at_port != index_at_dup_port) &&
  188. (index_at_port >= 0) &&
  189. (index_at_dup_port >= 0))
  190. can_mf_bond = false;
  191. /* If the mac is already in the primary table, the slot must be
  192. * available in the duplicate table as well.
  193. */
  194. if (index_at_port >= 0 && index_at_dup_port < 0 &&
  195. dup_table->refs[index_at_port]) {
  196. can_mf_bond = false;
  197. }
  198. /* If the mac is already in the duplicate table, check that the
  199. * corresponding index is not occupied in the primary table, or
  200. * the primary table already contains the mac at the same index.
  201. * Otherwise, you cannot bond (primary contains a different mac
  202. * at that index).
  203. */
  204. if (index_at_dup_port >= 0) {
  205. if (!table->refs[index_at_dup_port] ||
  206. ((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[index_at_dup_port]))))
  207. free_for_dup = index_at_dup_port;
  208. else
  209. can_mf_bond = false;
  210. }
  211. }
  212. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  213. if (!table->refs[i]) {
  214. if (free < 0)
  215. free = i;
  216. if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
  217. if (!dup_table->refs[i])
  218. free_for_dup = i;
  219. }
  220. continue;
  221. }
  222. if ((MLX4_MAC_MASK & mac) ==
  223. (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
  224. /* MAC already registered, increment ref count */
  225. err = i;
  226. ++table->refs[i];
  227. if (dup) {
  228. u64 dup_mac = MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]);
  229. if (dup_mac != mac || !dup_table->is_dup[i]) {
  230. mlx4_warn(dev, "register mac: expect duplicate mac 0x%llx on port %d index %d\n",
  231. mac, dup_port, i);
  232. }
  233. }
  234. goto out;
  235. }
  236. }
  237. if (need_mf_bond && (free_for_dup < 0)) {
  238. if (dup) {
  239. mlx4_warn(dev, "Fail to allocate duplicate MAC table entry\n");
  240. mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
  241. dup = false;
  242. }
  243. can_mf_bond = false;
  244. }
  245. if (need_mf_bond && can_mf_bond)
  246. free = free_for_dup;
  247. mlx4_dbg(dev, "Free MAC index is %d\n", free);
  248. if (table->total == table->max) {
  249. /* No free mac entries */
  250. err = -ENOSPC;
  251. goto out;
  252. }
  253. /* Register new MAC */
  254. table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
  255. err = mlx4_set_port_mac_table(dev, port, table->entries);
  256. if (unlikely(err)) {
  257. mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
  258. (unsigned long long) mac);
  259. table->entries[free] = 0;
  260. goto out;
  261. }
  262. table->refs[free] = 1;
  263. table->is_dup[free] = false;
  264. ++table->total;
  265. if (dup) {
  266. dup_table->refs[free] = 0;
  267. dup_table->is_dup[free] = true;
  268. dup_table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
  269. err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
  270. if (unlikely(err)) {
  271. mlx4_warn(dev, "Failed adding duplicate mac: 0x%llx\n", mac);
  272. dup_table->is_dup[free] = false;
  273. dup_table->entries[free] = 0;
  274. goto out;
  275. }
  276. ++dup_table->total;
  277. }
  278. err = free;
  279. out:
  280. if (need_mf_bond) {
  281. if (port == 2) {
  282. mutex_unlock(&table->mutex);
  283. mutex_unlock(&dup_table->mutex);
  284. } else {
  285. mutex_unlock(&dup_table->mutex);
  286. mutex_unlock(&table->mutex);
  287. }
  288. } else {
  289. mutex_unlock(&table->mutex);
  290. }
  291. return err;
  292. }
  293. EXPORT_SYMBOL_GPL(__mlx4_register_mac);
  294. int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  295. {
  296. u64 out_param = 0;
  297. int err = -EINVAL;
  298. if (mlx4_is_mfunc(dev)) {
  299. if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
  300. err = mlx4_cmd_imm(dev, mac, &out_param,
  301. ((u32) port) << 8 | (u32) RES_MAC,
  302. RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
  303. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  304. }
  305. if (err && err == -EINVAL && mlx4_is_slave(dev)) {
  306. /* retry using old REG_MAC format */
  307. set_param_l(&out_param, port);
  308. err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
  309. RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
  310. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  311. if (!err)
  312. dev->flags |= MLX4_FLAG_OLD_REG_MAC;
  313. }
  314. if (err)
  315. return err;
  316. return get_param_l(&out_param);
  317. }
  318. return __mlx4_register_mac(dev, port, mac);
  319. }
  320. EXPORT_SYMBOL_GPL(mlx4_register_mac);
  321. int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
  322. {
  323. return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
  324. (port - 1) * (1 << dev->caps.log_num_macs);
  325. }
  326. EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
  327. void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  328. {
  329. struct mlx4_port_info *info;
  330. struct mlx4_mac_table *table;
  331. int index;
  332. bool dup = mlx4_is_mf_bonded(dev);
  333. u8 dup_port = (port == 1) ? 2 : 1;
  334. struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
  335. if (port < 1 || port > dev->caps.num_ports) {
  336. mlx4_warn(dev, "invalid port number (%d), aborting...\n", port);
  337. return;
  338. }
  339. info = &mlx4_priv(dev)->port[port];
  340. table = &info->mac_table;
  341. if (dup) {
  342. if (port == 1) {
  343. mutex_lock(&table->mutex);
  344. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  345. } else {
  346. mutex_lock(&dup_table->mutex);
  347. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  348. }
  349. } else {
  350. mutex_lock(&table->mutex);
  351. }
  352. index = find_index(dev, table, mac);
  353. if (validate_index(dev, table, index))
  354. goto out;
  355. if (--table->refs[index] || table->is_dup[index]) {
  356. mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
  357. index);
  358. if (!table->refs[index])
  359. dup_table->is_dup[index] = false;
  360. goto out;
  361. }
  362. table->entries[index] = 0;
  363. if (mlx4_set_port_mac_table(dev, port, table->entries))
  364. mlx4_warn(dev, "Fail to set mac in port %d during unregister\n", port);
  365. --table->total;
  366. if (dup) {
  367. dup_table->is_dup[index] = false;
  368. if (dup_table->refs[index])
  369. goto out;
  370. dup_table->entries[index] = 0;
  371. if (mlx4_set_port_mac_table(dev, dup_port, dup_table->entries))
  372. mlx4_warn(dev, "Fail to set mac in duplicate port %d during unregister\n", dup_port);
  373. --table->total;
  374. }
  375. out:
  376. if (dup) {
  377. if (port == 2) {
  378. mutex_unlock(&table->mutex);
  379. mutex_unlock(&dup_table->mutex);
  380. } else {
  381. mutex_unlock(&dup_table->mutex);
  382. mutex_unlock(&table->mutex);
  383. }
  384. } else {
  385. mutex_unlock(&table->mutex);
  386. }
  387. }
  388. EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
  389. void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  390. {
  391. u64 out_param = 0;
  392. if (mlx4_is_mfunc(dev)) {
  393. if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
  394. (void) mlx4_cmd_imm(dev, mac, &out_param,
  395. ((u32) port) << 8 | (u32) RES_MAC,
  396. RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
  397. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  398. } else {
  399. /* use old unregister mac format */
  400. set_param_l(&out_param, port);
  401. (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
  402. RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
  403. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  404. }
  405. return;
  406. }
  407. __mlx4_unregister_mac(dev, port, mac);
  408. return;
  409. }
  410. EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
  411. int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
  412. {
  413. struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
  414. struct mlx4_mac_table *table = &info->mac_table;
  415. int index = qpn - info->base_qpn;
  416. int err = 0;
  417. bool dup = mlx4_is_mf_bonded(dev);
  418. u8 dup_port = (port == 1) ? 2 : 1;
  419. struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
  420. /* CX1 doesn't support multi-functions */
  421. if (dup) {
  422. if (port == 1) {
  423. mutex_lock(&table->mutex);
  424. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  425. } else {
  426. mutex_lock(&dup_table->mutex);
  427. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  428. }
  429. } else {
  430. mutex_lock(&table->mutex);
  431. }
  432. err = validate_index(dev, table, index);
  433. if (err)
  434. goto out;
  435. table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
  436. err = mlx4_set_port_mac_table(dev, port, table->entries);
  437. if (unlikely(err)) {
  438. mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
  439. (unsigned long long) new_mac);
  440. table->entries[index] = 0;
  441. } else {
  442. if (dup) {
  443. dup_table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
  444. err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
  445. if (unlikely(err)) {
  446. mlx4_err(dev, "Failed adding duplicate MAC: 0x%llx\n",
  447. (unsigned long long)new_mac);
  448. dup_table->entries[index] = 0;
  449. }
  450. }
  451. }
  452. out:
  453. if (dup) {
  454. if (port == 2) {
  455. mutex_unlock(&table->mutex);
  456. mutex_unlock(&dup_table->mutex);
  457. } else {
  458. mutex_unlock(&dup_table->mutex);
  459. mutex_unlock(&table->mutex);
  460. }
  461. } else {
  462. mutex_unlock(&table->mutex);
  463. }
  464. return err;
  465. }
  466. EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
  467. static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
  468. __be32 *entries)
  469. {
  470. struct mlx4_cmd_mailbox *mailbox;
  471. u32 in_mod;
  472. int err;
  473. mailbox = mlx4_alloc_cmd_mailbox(dev);
  474. if (IS_ERR(mailbox))
  475. return PTR_ERR(mailbox);
  476. memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
  477. in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
  478. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  479. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  480. MLX4_CMD_NATIVE);
  481. mlx4_free_cmd_mailbox(dev, mailbox);
  482. return err;
  483. }
  484. int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
  485. {
  486. struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
  487. int i;
  488. for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
  489. if (table->refs[i] &&
  490. (vid == (MLX4_VLAN_MASK &
  491. be32_to_cpu(table->entries[i])))) {
  492. /* VLAN already registered, increase reference count */
  493. *idx = i;
  494. return 0;
  495. }
  496. }
  497. return -ENOENT;
  498. }
  499. EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
  500. int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
  501. int *index)
  502. {
  503. struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
  504. int i, err = 0;
  505. int free = -1;
  506. int free_for_dup = -1;
  507. bool dup = mlx4_is_mf_bonded(dev);
  508. u8 dup_port = (port == 1) ? 2 : 1;
  509. struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
  510. bool need_mf_bond = mlx4_need_mf_bond(dev);
  511. bool can_mf_bond = true;
  512. mlx4_dbg(dev, "Registering VLAN: %d for port %d %s duplicate\n",
  513. vlan, port,
  514. dup ? "with" : "without");
  515. if (need_mf_bond) {
  516. if (port == 1) {
  517. mutex_lock(&table->mutex);
  518. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  519. } else {
  520. mutex_lock(&dup_table->mutex);
  521. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  522. }
  523. } else {
  524. mutex_lock(&table->mutex);
  525. }
  526. if (table->total == table->max) {
  527. /* No free vlan entries */
  528. err = -ENOSPC;
  529. goto out;
  530. }
  531. if (need_mf_bond) {
  532. int index_at_port = -1;
  533. int index_at_dup_port = -1;
  534. for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
  535. if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i])))
  536. index_at_port = i;
  537. if (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i])))
  538. index_at_dup_port = i;
  539. }
  540. /* check that same vlan is not in the tables at different indices */
  541. if ((index_at_port != index_at_dup_port) &&
  542. (index_at_port >= 0) &&
  543. (index_at_dup_port >= 0))
  544. can_mf_bond = false;
  545. /* If the vlan is already in the primary table, the slot must be
  546. * available in the duplicate table as well.
  547. */
  548. if (index_at_port >= 0 && index_at_dup_port < 0 &&
  549. dup_table->refs[index_at_port]) {
  550. can_mf_bond = false;
  551. }
  552. /* If the vlan is already in the duplicate table, check that the
  553. * corresponding index is not occupied in the primary table, or
  554. * the primary table already contains the vlan at the same index.
  555. * Otherwise, you cannot bond (primary contains a different vlan
  556. * at that index).
  557. */
  558. if (index_at_dup_port >= 0) {
  559. if (!table->refs[index_at_dup_port] ||
  560. (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[index_at_dup_port]))))
  561. free_for_dup = index_at_dup_port;
  562. else
  563. can_mf_bond = false;
  564. }
  565. }
  566. for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
  567. if (!table->refs[i]) {
  568. if (free < 0)
  569. free = i;
  570. if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
  571. if (!dup_table->refs[i])
  572. free_for_dup = i;
  573. }
  574. }
  575. if ((table->refs[i] || table->is_dup[i]) &&
  576. (vlan == (MLX4_VLAN_MASK &
  577. be32_to_cpu(table->entries[i])))) {
  578. /* Vlan already registered, increase references count */
  579. mlx4_dbg(dev, "vlan %u is already registered.\n", vlan);
  580. *index = i;
  581. ++table->refs[i];
  582. if (dup) {
  583. u16 dup_vlan = MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]);
  584. if (dup_vlan != vlan || !dup_table->is_dup[i]) {
  585. mlx4_warn(dev, "register vlan: expected duplicate vlan %u on port %d index %d\n",
  586. vlan, dup_port, i);
  587. }
  588. }
  589. goto out;
  590. }
  591. }
  592. if (need_mf_bond && (free_for_dup < 0)) {
  593. if (dup) {
  594. mlx4_warn(dev, "Fail to allocate duplicate VLAN table entry\n");
  595. mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
  596. dup = false;
  597. }
  598. can_mf_bond = false;
  599. }
  600. if (need_mf_bond && can_mf_bond)
  601. free = free_for_dup;
  602. if (free < 0) {
  603. err = -ENOMEM;
  604. goto out;
  605. }
  606. /* Register new VLAN */
  607. table->refs[free] = 1;
  608. table->is_dup[free] = false;
  609. table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
  610. err = mlx4_set_port_vlan_table(dev, port, table->entries);
  611. if (unlikely(err)) {
  612. mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
  613. table->refs[free] = 0;
  614. table->entries[free] = 0;
  615. goto out;
  616. }
  617. ++table->total;
  618. if (dup) {
  619. dup_table->refs[free] = 0;
  620. dup_table->is_dup[free] = true;
  621. dup_table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
  622. err = mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries);
  623. if (unlikely(err)) {
  624. mlx4_warn(dev, "Failed adding duplicate vlan: %u\n", vlan);
  625. dup_table->is_dup[free] = false;
  626. dup_table->entries[free] = 0;
  627. goto out;
  628. }
  629. ++dup_table->total;
  630. }
  631. *index = free;
  632. out:
  633. if (need_mf_bond) {
  634. if (port == 2) {
  635. mutex_unlock(&table->mutex);
  636. mutex_unlock(&dup_table->mutex);
  637. } else {
  638. mutex_unlock(&dup_table->mutex);
  639. mutex_unlock(&table->mutex);
  640. }
  641. } else {
  642. mutex_unlock(&table->mutex);
  643. }
  644. return err;
  645. }
  646. int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
  647. {
  648. u64 out_param = 0;
  649. int err;
  650. if (vlan > 4095)
  651. return -EINVAL;
  652. if (mlx4_is_mfunc(dev)) {
  653. err = mlx4_cmd_imm(dev, vlan, &out_param,
  654. ((u32) port) << 8 | (u32) RES_VLAN,
  655. RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
  656. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  657. if (!err)
  658. *index = get_param_l(&out_param);
  659. return err;
  660. }
  661. return __mlx4_register_vlan(dev, port, vlan, index);
  662. }
  663. EXPORT_SYMBOL_GPL(mlx4_register_vlan);
  664. void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
  665. {
  666. struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
  667. int index;
  668. bool dup = mlx4_is_mf_bonded(dev);
  669. u8 dup_port = (port == 1) ? 2 : 1;
  670. struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
  671. if (dup) {
  672. if (port == 1) {
  673. mutex_lock(&table->mutex);
  674. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  675. } else {
  676. mutex_lock(&dup_table->mutex);
  677. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  678. }
  679. } else {
  680. mutex_lock(&table->mutex);
  681. }
  682. if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
  683. mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
  684. goto out;
  685. }
  686. if (index < MLX4_VLAN_REGULAR) {
  687. mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
  688. goto out;
  689. }
  690. if (--table->refs[index] || table->is_dup[index]) {
  691. mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
  692. table->refs[index], index);
  693. if (!table->refs[index])
  694. dup_table->is_dup[index] = false;
  695. goto out;
  696. }
  697. table->entries[index] = 0;
  698. if (mlx4_set_port_vlan_table(dev, port, table->entries))
  699. mlx4_warn(dev, "Fail to set vlan in port %d during unregister\n", port);
  700. --table->total;
  701. if (dup) {
  702. dup_table->is_dup[index] = false;
  703. if (dup_table->refs[index])
  704. goto out;
  705. dup_table->entries[index] = 0;
  706. if (mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries))
  707. mlx4_warn(dev, "Fail to set vlan in duplicate port %d during unregister\n", dup_port);
  708. --dup_table->total;
  709. }
  710. out:
  711. if (dup) {
  712. if (port == 2) {
  713. mutex_unlock(&table->mutex);
  714. mutex_unlock(&dup_table->mutex);
  715. } else {
  716. mutex_unlock(&dup_table->mutex);
  717. mutex_unlock(&table->mutex);
  718. }
  719. } else {
  720. mutex_unlock(&table->mutex);
  721. }
  722. }
  723. void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
  724. {
  725. u64 out_param = 0;
  726. if (mlx4_is_mfunc(dev)) {
  727. (void) mlx4_cmd_imm(dev, vlan, &out_param,
  728. ((u32) port) << 8 | (u32) RES_VLAN,
  729. RES_OP_RESERVE_AND_MAP,
  730. MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
  731. MLX4_CMD_WRAPPED);
  732. return;
  733. }
  734. __mlx4_unregister_vlan(dev, port, vlan);
  735. }
  736. EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
  737. int mlx4_bond_mac_table(struct mlx4_dev *dev)
  738. {
  739. struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
  740. struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
  741. int ret = 0;
  742. int i;
  743. bool update1 = false;
  744. bool update2 = false;
  745. mutex_lock(&t1->mutex);
  746. mutex_lock(&t2->mutex);
  747. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  748. if ((t1->entries[i] != t2->entries[i]) &&
  749. t1->entries[i] && t2->entries[i]) {
  750. mlx4_warn(dev, "can't duplicate entry %d in mac table\n", i);
  751. ret = -EINVAL;
  752. goto unlock;
  753. }
  754. }
  755. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  756. if (t1->entries[i] && !t2->entries[i]) {
  757. t2->entries[i] = t1->entries[i];
  758. t2->is_dup[i] = true;
  759. update2 = true;
  760. } else if (!t1->entries[i] && t2->entries[i]) {
  761. t1->entries[i] = t2->entries[i];
  762. t1->is_dup[i] = true;
  763. update1 = true;
  764. } else if (t1->entries[i] && t2->entries[i]) {
  765. t1->is_dup[i] = true;
  766. t2->is_dup[i] = true;
  767. }
  768. }
  769. if (update1) {
  770. ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
  771. if (ret)
  772. mlx4_warn(dev, "failed to set MAC table for port 1 (%d)\n", ret);
  773. }
  774. if (!ret && update2) {
  775. ret = mlx4_set_port_mac_table(dev, 2, t2->entries);
  776. if (ret)
  777. mlx4_warn(dev, "failed to set MAC table for port 2 (%d)\n", ret);
  778. }
  779. if (ret)
  780. mlx4_warn(dev, "failed to create mirror MAC tables\n");
  781. unlock:
  782. mutex_unlock(&t2->mutex);
  783. mutex_unlock(&t1->mutex);
  784. return ret;
  785. }
  786. int mlx4_unbond_mac_table(struct mlx4_dev *dev)
  787. {
  788. struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
  789. struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
  790. int ret = 0;
  791. int ret1;
  792. int i;
  793. bool update1 = false;
  794. bool update2 = false;
  795. mutex_lock(&t1->mutex);
  796. mutex_lock(&t2->mutex);
  797. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  798. if (t1->entries[i] != t2->entries[i]) {
  799. mlx4_warn(dev, "mac table is in an unexpected state when trying to unbond\n");
  800. ret = -EINVAL;
  801. goto unlock;
  802. }
  803. }
  804. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  805. if (!t1->entries[i])
  806. continue;
  807. t1->is_dup[i] = false;
  808. if (!t1->refs[i]) {
  809. t1->entries[i] = 0;
  810. update1 = true;
  811. }
  812. t2->is_dup[i] = false;
  813. if (!t2->refs[i]) {
  814. t2->entries[i] = 0;
  815. update2 = true;
  816. }
  817. }
  818. if (update1) {
  819. ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
  820. if (ret)
  821. mlx4_warn(dev, "failed to unmirror MAC tables for port 1(%d)\n", ret);
  822. }
  823. if (update2) {
  824. ret1 = mlx4_set_port_mac_table(dev, 2, t2->entries);
  825. if (ret1) {
  826. mlx4_warn(dev, "failed to unmirror MAC tables for port 2(%d)\n", ret1);
  827. ret = ret1;
  828. }
  829. }
  830. unlock:
  831. mutex_unlock(&t2->mutex);
  832. mutex_unlock(&t1->mutex);
  833. return ret;
  834. }
  835. int mlx4_bond_vlan_table(struct mlx4_dev *dev)
  836. {
  837. struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
  838. struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
  839. int ret = 0;
  840. int i;
  841. bool update1 = false;
  842. bool update2 = false;
  843. mutex_lock(&t1->mutex);
  844. mutex_lock(&t2->mutex);
  845. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  846. if ((t1->entries[i] != t2->entries[i]) &&
  847. t1->entries[i] && t2->entries[i]) {
  848. mlx4_warn(dev, "can't duplicate entry %d in vlan table\n", i);
  849. ret = -EINVAL;
  850. goto unlock;
  851. }
  852. }
  853. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  854. if (t1->entries[i] && !t2->entries[i]) {
  855. t2->entries[i] = t1->entries[i];
  856. t2->is_dup[i] = true;
  857. update2 = true;
  858. } else if (!t1->entries[i] && t2->entries[i]) {
  859. t1->entries[i] = t2->entries[i];
  860. t1->is_dup[i] = true;
  861. update1 = true;
  862. } else if (t1->entries[i] && t2->entries[i]) {
  863. t1->is_dup[i] = true;
  864. t2->is_dup[i] = true;
  865. }
  866. }
  867. if (update1) {
  868. ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
  869. if (ret)
  870. mlx4_warn(dev, "failed to set VLAN table for port 1 (%d)\n", ret);
  871. }
  872. if (!ret && update2) {
  873. ret = mlx4_set_port_vlan_table(dev, 2, t2->entries);
  874. if (ret)
  875. mlx4_warn(dev, "failed to set VLAN table for port 2 (%d)\n", ret);
  876. }
  877. if (ret)
  878. mlx4_warn(dev, "failed to create mirror VLAN tables\n");
  879. unlock:
  880. mutex_unlock(&t2->mutex);
  881. mutex_unlock(&t1->mutex);
  882. return ret;
  883. }
  884. int mlx4_unbond_vlan_table(struct mlx4_dev *dev)
  885. {
  886. struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
  887. struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
  888. int ret = 0;
  889. int ret1;
  890. int i;
  891. bool update1 = false;
  892. bool update2 = false;
  893. mutex_lock(&t1->mutex);
  894. mutex_lock(&t2->mutex);
  895. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  896. if (t1->entries[i] != t2->entries[i]) {
  897. mlx4_warn(dev, "vlan table is in an unexpected state when trying to unbond\n");
  898. ret = -EINVAL;
  899. goto unlock;
  900. }
  901. }
  902. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  903. if (!t1->entries[i])
  904. continue;
  905. t1->is_dup[i] = false;
  906. if (!t1->refs[i]) {
  907. t1->entries[i] = 0;
  908. update1 = true;
  909. }
  910. t2->is_dup[i] = false;
  911. if (!t2->refs[i]) {
  912. t2->entries[i] = 0;
  913. update2 = true;
  914. }
  915. }
  916. if (update1) {
  917. ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
  918. if (ret)
  919. mlx4_warn(dev, "failed to unmirror VLAN tables for port 1(%d)\n", ret);
  920. }
  921. if (update2) {
  922. ret1 = mlx4_set_port_vlan_table(dev, 2, t2->entries);
  923. if (ret1) {
  924. mlx4_warn(dev, "failed to unmirror VLAN tables for port 2(%d)\n", ret1);
  925. ret = ret1;
  926. }
  927. }
  928. unlock:
  929. mutex_unlock(&t2->mutex);
  930. mutex_unlock(&t1->mutex);
  931. return ret;
  932. }
  933. int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
  934. {
  935. struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
  936. u8 *inbuf, *outbuf;
  937. int err;
  938. inmailbox = mlx4_alloc_cmd_mailbox(dev);
  939. if (IS_ERR(inmailbox))
  940. return PTR_ERR(inmailbox);
  941. outmailbox = mlx4_alloc_cmd_mailbox(dev);
  942. if (IS_ERR(outmailbox)) {
  943. mlx4_free_cmd_mailbox(dev, inmailbox);
  944. return PTR_ERR(outmailbox);
  945. }
  946. inbuf = inmailbox->buf;
  947. outbuf = outmailbox->buf;
  948. inbuf[0] = 1;
  949. inbuf[1] = 1;
  950. inbuf[2] = 1;
  951. inbuf[3] = 1;
  952. *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
  953. *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
  954. err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
  955. MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
  956. MLX4_CMD_NATIVE);
  957. if (!err)
  958. *caps = *(__be32 *) (outbuf + 84);
  959. mlx4_free_cmd_mailbox(dev, inmailbox);
  960. mlx4_free_cmd_mailbox(dev, outmailbox);
  961. return err;
  962. }
  963. static struct mlx4_roce_gid_entry zgid_entry;
  964. int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
  965. {
  966. int vfs;
  967. int slave_gid = slave;
  968. unsigned i;
  969. struct mlx4_slaves_pport slaves_pport;
  970. struct mlx4_active_ports actv_ports;
  971. unsigned max_port_p_one;
  972. if (slave == 0)
  973. return MLX4_ROCE_PF_GIDS;
  974. /* Slave is a VF */
  975. slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
  976. actv_ports = mlx4_get_active_ports(dev, slave);
  977. max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
  978. bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
  979. for (i = 1; i < max_port_p_one; i++) {
  980. struct mlx4_active_ports exclusive_ports;
  981. struct mlx4_slaves_pport slaves_pport_actv;
  982. bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
  983. set_bit(i - 1, exclusive_ports.ports);
  984. if (i == port)
  985. continue;
  986. slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
  987. dev, &exclusive_ports);
  988. slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
  989. dev->persist->num_vfs + 1);
  990. }
  991. vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
  992. if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
  993. return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
  994. return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
  995. }
  996. int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
  997. {
  998. int gids;
  999. unsigned i;
  1000. int slave_gid = slave;
  1001. int vfs;
  1002. struct mlx4_slaves_pport slaves_pport;
  1003. struct mlx4_active_ports actv_ports;
  1004. unsigned max_port_p_one;
  1005. if (slave == 0)
  1006. return 0;
  1007. slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
  1008. actv_ports = mlx4_get_active_ports(dev, slave);
  1009. max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
  1010. bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
  1011. for (i = 1; i < max_port_p_one; i++) {
  1012. struct mlx4_active_ports exclusive_ports;
  1013. struct mlx4_slaves_pport slaves_pport_actv;
  1014. bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
  1015. set_bit(i - 1, exclusive_ports.ports);
  1016. if (i == port)
  1017. continue;
  1018. slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
  1019. dev, &exclusive_ports);
  1020. slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
  1021. dev->persist->num_vfs + 1);
  1022. }
  1023. gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
  1024. vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
  1025. if (slave_gid <= gids % vfs)
  1026. return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
  1027. return MLX4_ROCE_PF_GIDS + (gids % vfs) +
  1028. ((gids / vfs) * (slave_gid - 1));
  1029. }
  1030. EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
  1031. static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
  1032. int port, struct mlx4_cmd_mailbox *mailbox)
  1033. {
  1034. struct mlx4_roce_gid_entry *gid_entry_mbox;
  1035. struct mlx4_priv *priv = mlx4_priv(dev);
  1036. int num_gids, base, offset;
  1037. int i, err;
  1038. num_gids = mlx4_get_slave_num_gids(dev, slave, port);
  1039. base = mlx4_get_base_gid_ix(dev, slave, port);
  1040. memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
  1041. mutex_lock(&(priv->port[port].gid_table.mutex));
  1042. /* Zero-out gids belonging to that slave in the port GID table */
  1043. for (i = 0, offset = base; i < num_gids; offset++, i++)
  1044. memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
  1045. zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
  1046. /* Now, copy roce port gids table to mailbox for passing to FW */
  1047. gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
  1048. for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
  1049. memcpy(gid_entry_mbox->raw,
  1050. priv->port[port].gid_table.roce_gids[i].raw,
  1051. MLX4_ROCE_GID_ENTRY_SIZE);
  1052. err = mlx4_cmd(dev, mailbox->dma,
  1053. ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8),
  1054. MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
  1055. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
  1056. mutex_unlock(&(priv->port[port].gid_table.mutex));
  1057. return err;
  1058. }
  1059. void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
  1060. {
  1061. struct mlx4_active_ports actv_ports;
  1062. struct mlx4_cmd_mailbox *mailbox;
  1063. int num_eth_ports, err;
  1064. int i;
  1065. if (slave < 0 || slave > dev->persist->num_vfs)
  1066. return;
  1067. actv_ports = mlx4_get_active_ports(dev, slave);
  1068. for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
  1069. if (test_bit(i, actv_ports.ports)) {
  1070. if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
  1071. continue;
  1072. num_eth_ports++;
  1073. }
  1074. }
  1075. if (!num_eth_ports)
  1076. return;
  1077. /* have ETH ports. Alloc mailbox for SET_PORT command */
  1078. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1079. if (IS_ERR(mailbox))
  1080. return;
  1081. for (i = 0; i < dev->caps.num_ports; i++) {
  1082. if (test_bit(i, actv_ports.ports)) {
  1083. if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
  1084. continue;
  1085. err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
  1086. if (err)
  1087. mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
  1088. slave, i + 1, err);
  1089. }
  1090. }
  1091. mlx4_free_cmd_mailbox(dev, mailbox);
  1092. return;
  1093. }
  1094. static void
  1095. mlx4_en_set_port_mtu(struct mlx4_dev *dev, int slave, int port,
  1096. struct mlx4_set_port_general_context *gen_context)
  1097. {
  1098. struct mlx4_priv *priv = mlx4_priv(dev);
  1099. struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
  1100. struct mlx4_slave_state *slave_st = &master->slave_state[slave];
  1101. u16 mtu, prev_mtu;
  1102. /* Mtu is configured as the max USER_MTU among all
  1103. * the functions on the port.
  1104. */
  1105. mtu = be16_to_cpu(gen_context->mtu);
  1106. mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
  1107. ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
  1108. prev_mtu = slave_st->mtu[port];
  1109. slave_st->mtu[port] = mtu;
  1110. if (mtu > master->max_mtu[port])
  1111. master->max_mtu[port] = mtu;
  1112. if (mtu < prev_mtu && prev_mtu == master->max_mtu[port]) {
  1113. int i;
  1114. slave_st->mtu[port] = mtu;
  1115. master->max_mtu[port] = mtu;
  1116. for (i = 0; i < dev->num_slaves; i++)
  1117. master->max_mtu[port] =
  1118. max_t(u16, master->max_mtu[port],
  1119. master->slave_state[i].mtu[port]);
  1120. }
  1121. gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
  1122. }
  1123. static void
  1124. mlx4_en_set_port_user_mtu(struct mlx4_dev *dev, int slave, int port,
  1125. struct mlx4_set_port_general_context *gen_context)
  1126. {
  1127. struct mlx4_priv *priv = mlx4_priv(dev);
  1128. struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
  1129. struct mlx4_slave_state *slave_st = &master->slave_state[slave];
  1130. u16 user_mtu, prev_user_mtu;
  1131. /* User Mtu is configured as the max USER_MTU among all
  1132. * the functions on the port.
  1133. */
  1134. user_mtu = be16_to_cpu(gen_context->user_mtu);
  1135. user_mtu = min_t(int, user_mtu, dev->caps.eth_mtu_cap[port]);
  1136. prev_user_mtu = slave_st->user_mtu[port];
  1137. slave_st->user_mtu[port] = user_mtu;
  1138. if (user_mtu > master->max_user_mtu[port])
  1139. master->max_user_mtu[port] = user_mtu;
  1140. if (user_mtu < prev_user_mtu &&
  1141. prev_user_mtu == master->max_user_mtu[port]) {
  1142. int i;
  1143. slave_st->user_mtu[port] = user_mtu;
  1144. master->max_user_mtu[port] = user_mtu;
  1145. for (i = 0; i < dev->num_slaves; i++)
  1146. master->max_user_mtu[port] =
  1147. max_t(u16, master->max_user_mtu[port],
  1148. master->slave_state[i].user_mtu[port]);
  1149. }
  1150. gen_context->user_mtu = cpu_to_be16(master->max_user_mtu[port]);
  1151. }
  1152. static void
  1153. mlx4_en_set_port_global_pause(struct mlx4_dev *dev, int slave,
  1154. struct mlx4_set_port_general_context *gen_context)
  1155. {
  1156. struct mlx4_priv *priv = mlx4_priv(dev);
  1157. struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
  1158. /* Slave cannot change Global Pause configuration */
  1159. if (slave != mlx4_master_func_num(dev) &&
  1160. (gen_context->pptx != master->pptx ||
  1161. gen_context->pprx != master->pprx)) {
  1162. gen_context->pptx = master->pptx;
  1163. gen_context->pprx = master->pprx;
  1164. mlx4_warn(dev, "denying Global Pause change for slave:%d\n",
  1165. slave);
  1166. } else {
  1167. master->pptx = gen_context->pptx;
  1168. master->pprx = gen_context->pprx;
  1169. }
  1170. }
  1171. static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
  1172. u8 op_mod, struct mlx4_cmd_mailbox *inbox)
  1173. {
  1174. struct mlx4_priv *priv = mlx4_priv(dev);
  1175. struct mlx4_port_info *port_info;
  1176. struct mlx4_set_port_rqp_calc_context *qpn_context;
  1177. struct mlx4_set_port_general_context *gen_context;
  1178. struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
  1179. int reset_qkey_viols;
  1180. int port;
  1181. int is_eth;
  1182. int num_gids;
  1183. int base;
  1184. u32 in_modifier;
  1185. u32 promisc;
  1186. int err;
  1187. int i, j;
  1188. int offset;
  1189. __be32 agg_cap_mask;
  1190. __be32 slave_cap_mask;
  1191. __be32 new_cap_mask;
  1192. port = in_mod & 0xff;
  1193. in_modifier = in_mod >> 8;
  1194. is_eth = op_mod;
  1195. port_info = &priv->port[port];
  1196. /* Slaves cannot perform SET_PORT operations,
  1197. * except for changing MTU and USER_MTU.
  1198. */
  1199. if (is_eth) {
  1200. if (slave != dev->caps.function &&
  1201. in_modifier != MLX4_SET_PORT_GENERAL &&
  1202. in_modifier != MLX4_SET_PORT_GID_TABLE) {
  1203. mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
  1204. slave);
  1205. return -EINVAL;
  1206. }
  1207. switch (in_modifier) {
  1208. case MLX4_SET_PORT_RQP_CALC:
  1209. qpn_context = inbox->buf;
  1210. qpn_context->base_qpn =
  1211. cpu_to_be32(port_info->base_qpn);
  1212. qpn_context->n_mac = 0x7;
  1213. promisc = be32_to_cpu(qpn_context->promisc) >>
  1214. SET_PORT_PROMISC_SHIFT;
  1215. qpn_context->promisc = cpu_to_be32(
  1216. promisc << SET_PORT_PROMISC_SHIFT |
  1217. port_info->base_qpn);
  1218. promisc = be32_to_cpu(qpn_context->mcast) >>
  1219. SET_PORT_MC_PROMISC_SHIFT;
  1220. qpn_context->mcast = cpu_to_be32(
  1221. promisc << SET_PORT_MC_PROMISC_SHIFT |
  1222. port_info->base_qpn);
  1223. break;
  1224. case MLX4_SET_PORT_GENERAL:
  1225. gen_context = inbox->buf;
  1226. if (gen_context->flags & MLX4_FLAG_V_MTU_MASK)
  1227. mlx4_en_set_port_mtu(dev, slave, port,
  1228. gen_context);
  1229. if (gen_context->flags2 & MLX4_FLAG2_V_USER_MTU_MASK)
  1230. mlx4_en_set_port_user_mtu(dev, slave, port,
  1231. gen_context);
  1232. if (gen_context->flags &
  1233. (MLX4_FLAG_V_PPRX_MASK | MLX4_FLAG_V_PPTX_MASK))
  1234. mlx4_en_set_port_global_pause(dev, slave,
  1235. gen_context);
  1236. break;
  1237. case MLX4_SET_PORT_GID_TABLE:
  1238. /* change to MULTIPLE entries: number of guest's gids
  1239. * need a FOR-loop here over number of gids the guest has.
  1240. * 1. Check no duplicates in gids passed by slave
  1241. */
  1242. num_gids = mlx4_get_slave_num_gids(dev, slave, port);
  1243. base = mlx4_get_base_gid_ix(dev, slave, port);
  1244. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  1245. for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
  1246. if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
  1247. sizeof(zgid_entry)))
  1248. continue;
  1249. gid_entry_mb1 = gid_entry_mbox + 1;
  1250. for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
  1251. if (!memcmp(gid_entry_mb1->raw,
  1252. zgid_entry.raw, sizeof(zgid_entry)))
  1253. continue;
  1254. if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
  1255. sizeof(gid_entry_mbox->raw))) {
  1256. /* found duplicate */
  1257. return -EINVAL;
  1258. }
  1259. }
  1260. }
  1261. /* 2. Check that do not have duplicates in OTHER
  1262. * entries in the port GID table
  1263. */
  1264. mutex_lock(&(priv->port[port].gid_table.mutex));
  1265. for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
  1266. if (i >= base && i < base + num_gids)
  1267. continue; /* don't compare to slave's current gids */
  1268. gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
  1269. if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
  1270. continue;
  1271. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  1272. for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
  1273. if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
  1274. sizeof(zgid_entry)))
  1275. continue;
  1276. if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
  1277. sizeof(gid_entry_tbl->raw))) {
  1278. /* found duplicate */
  1279. mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
  1280. slave, i);
  1281. mutex_unlock(&(priv->port[port].gid_table.mutex));
  1282. return -EINVAL;
  1283. }
  1284. }
  1285. }
  1286. /* insert slave GIDs with memcpy, starting at slave's base index */
  1287. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  1288. for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
  1289. memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
  1290. gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
  1291. /* Now, copy roce port gids table to current mailbox for passing to FW */
  1292. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  1293. for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
  1294. memcpy(gid_entry_mbox->raw,
  1295. priv->port[port].gid_table.roce_gids[i].raw,
  1296. MLX4_ROCE_GID_ENTRY_SIZE);
  1297. err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
  1298. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1299. MLX4_CMD_NATIVE);
  1300. mutex_unlock(&(priv->port[port].gid_table.mutex));
  1301. return err;
  1302. }
  1303. return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
  1304. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1305. MLX4_CMD_NATIVE);
  1306. }
  1307. /* Slaves are not allowed to SET_PORT beacon (LED) blink */
  1308. if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) {
  1309. mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave);
  1310. return -EPERM;
  1311. }
  1312. /* For IB, we only consider:
  1313. * - The capability mask, which is set to the aggregate of all
  1314. * slave function capabilities
  1315. * - The QKey violatin counter - reset according to each request.
  1316. */
  1317. if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
  1318. reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
  1319. new_cap_mask = ((__be32 *) inbox->buf)[2];
  1320. } else {
  1321. reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
  1322. new_cap_mask = ((__be32 *) inbox->buf)[1];
  1323. }
  1324. /* slave may not set the IS_SM capability for the port */
  1325. if (slave != mlx4_master_func_num(dev) &&
  1326. (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
  1327. return -EINVAL;
  1328. /* No DEV_MGMT in multifunc mode */
  1329. if (mlx4_is_mfunc(dev) &&
  1330. (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
  1331. return -EINVAL;
  1332. agg_cap_mask = 0;
  1333. slave_cap_mask =
  1334. priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
  1335. priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
  1336. for (i = 0; i < dev->num_slaves; i++)
  1337. agg_cap_mask |=
  1338. priv->mfunc.master.slave_state[i].ib_cap_mask[port];
  1339. /* only clear mailbox for guests. Master may be setting
  1340. * MTU or PKEY table size
  1341. */
  1342. if (slave != dev->caps.function)
  1343. memset(inbox->buf, 0, 256);
  1344. if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
  1345. *(u8 *) inbox->buf |= !!reset_qkey_viols << 6;
  1346. ((__be32 *) inbox->buf)[2] = agg_cap_mask;
  1347. } else {
  1348. ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols;
  1349. ((__be32 *) inbox->buf)[1] = agg_cap_mask;
  1350. }
  1351. err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
  1352. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
  1353. if (err)
  1354. priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
  1355. slave_cap_mask;
  1356. return err;
  1357. }
  1358. int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
  1359. struct mlx4_vhcr *vhcr,
  1360. struct mlx4_cmd_mailbox *inbox,
  1361. struct mlx4_cmd_mailbox *outbox,
  1362. struct mlx4_cmd_info *cmd)
  1363. {
  1364. int port = mlx4_slave_convert_port(
  1365. dev, slave, vhcr->in_modifier & 0xFF);
  1366. if (port < 0)
  1367. return -EINVAL;
  1368. vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
  1369. (port & 0xFF);
  1370. return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
  1371. vhcr->op_modifier, inbox);
  1372. }
  1373. /* bit locations for set port command with zero op modifier */
  1374. enum {
  1375. MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */
  1376. MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */
  1377. MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
  1378. MLX4_CHANGE_PORT_VL_CAP = 21,
  1379. MLX4_CHANGE_PORT_MTU_CAP = 22,
  1380. };
  1381. int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
  1382. {
  1383. struct mlx4_cmd_mailbox *mailbox;
  1384. int err, vl_cap, pkey_tbl_flag = 0;
  1385. if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
  1386. return 0;
  1387. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1388. if (IS_ERR(mailbox))
  1389. return PTR_ERR(mailbox);
  1390. ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
  1391. if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
  1392. pkey_tbl_flag = 1;
  1393. ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
  1394. }
  1395. /* IB VL CAP enum isn't used by the firmware, just numerical values */
  1396. for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
  1397. ((__be32 *) mailbox->buf)[0] = cpu_to_be32(
  1398. (1 << MLX4_CHANGE_PORT_MTU_CAP) |
  1399. (1 << MLX4_CHANGE_PORT_VL_CAP) |
  1400. (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
  1401. (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
  1402. (vl_cap << MLX4_SET_PORT_VL_CAP));
  1403. err = mlx4_cmd(dev, mailbox->dma, port,
  1404. MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT,
  1405. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
  1406. if (err != -ENOMEM)
  1407. break;
  1408. }
  1409. mlx4_free_cmd_mailbox(dev, mailbox);
  1410. return err;
  1411. }
  1412. #define SET_PORT_ROCE_2_FLAGS 0x10
  1413. #define MLX4_SET_PORT_ROCE_V1_V2 0x2
  1414. int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
  1415. u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
  1416. {
  1417. struct mlx4_cmd_mailbox *mailbox;
  1418. struct mlx4_set_port_general_context *context;
  1419. int err;
  1420. u32 in_mod;
  1421. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1422. if (IS_ERR(mailbox))
  1423. return PTR_ERR(mailbox);
  1424. context = mailbox->buf;
  1425. context->flags = SET_PORT_GEN_ALL_VALID;
  1426. context->mtu = cpu_to_be16(mtu);
  1427. context->pptx = (pptx * (!pfctx)) << 7;
  1428. context->pfctx = pfctx;
  1429. context->pprx = (pprx * (!pfcrx)) << 7;
  1430. context->pfcrx = pfcrx;
  1431. if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
  1432. context->flags |= SET_PORT_ROCE_2_FLAGS;
  1433. context->roce_mode |=
  1434. MLX4_SET_PORT_ROCE_V1_V2 << 4;
  1435. }
  1436. in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
  1437. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  1438. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1439. MLX4_CMD_WRAPPED);
  1440. mlx4_free_cmd_mailbox(dev, mailbox);
  1441. return err;
  1442. }
  1443. EXPORT_SYMBOL(mlx4_SET_PORT_general);
  1444. int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
  1445. u8 promisc)
  1446. {
  1447. struct mlx4_cmd_mailbox *mailbox;
  1448. struct mlx4_set_port_rqp_calc_context *context;
  1449. int err;
  1450. u32 in_mod;
  1451. u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
  1452. MCAST_DIRECT : MCAST_DEFAULT;
  1453. if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
  1454. return 0;
  1455. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1456. if (IS_ERR(mailbox))
  1457. return PTR_ERR(mailbox);
  1458. context = mailbox->buf;
  1459. context->base_qpn = cpu_to_be32(base_qpn);
  1460. context->n_mac = dev->caps.log_num_macs;
  1461. context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
  1462. base_qpn);
  1463. context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
  1464. base_qpn);
  1465. context->intra_no_vlan = 0;
  1466. context->no_vlan = MLX4_NO_VLAN_IDX;
  1467. context->intra_vlan_miss = 0;
  1468. context->vlan_miss = MLX4_VLAN_MISS_IDX;
  1469. in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
  1470. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  1471. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1472. MLX4_CMD_WRAPPED);
  1473. mlx4_free_cmd_mailbox(dev, mailbox);
  1474. return err;
  1475. }
  1476. EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
  1477. int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu)
  1478. {
  1479. struct mlx4_cmd_mailbox *mailbox;
  1480. struct mlx4_set_port_general_context *context;
  1481. u32 in_mod;
  1482. int err;
  1483. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1484. if (IS_ERR(mailbox))
  1485. return PTR_ERR(mailbox);
  1486. context = mailbox->buf;
  1487. context->flags2 |= MLX4_FLAG2_V_USER_MTU_MASK;
  1488. context->user_mtu = cpu_to_be16(user_mtu);
  1489. in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
  1490. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  1491. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1492. MLX4_CMD_WRAPPED);
  1493. mlx4_free_cmd_mailbox(dev, mailbox);
  1494. return err;
  1495. }
  1496. EXPORT_SYMBOL(mlx4_SET_PORT_user_mtu);
  1497. int mlx4_SET_PORT_user_mac(struct mlx4_dev *dev, u8 port, u8 *user_mac)
  1498. {
  1499. struct mlx4_cmd_mailbox *mailbox;
  1500. struct mlx4_set_port_general_context *context;
  1501. u32 in_mod;
  1502. int err;
  1503. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1504. if (IS_ERR(mailbox))
  1505. return PTR_ERR(mailbox);
  1506. context = mailbox->buf;
  1507. context->flags2 |= MLX4_FLAG2_V_USER_MAC_MASK;
  1508. memcpy(context->user_mac, user_mac, sizeof(context->user_mac));
  1509. in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
  1510. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  1511. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1512. MLX4_CMD_NATIVE);
  1513. mlx4_free_cmd_mailbox(dev, mailbox);
  1514. return err;
  1515. }
  1516. EXPORT_SYMBOL(mlx4_SET_PORT_user_mac);
  1517. int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
  1518. {
  1519. struct mlx4_cmd_mailbox *mailbox;
  1520. struct mlx4_set_port_general_context *context;
  1521. u32 in_mod;
  1522. int err;
  1523. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1524. if (IS_ERR(mailbox))
  1525. return PTR_ERR(mailbox);
  1526. context = mailbox->buf;
  1527. context->flags2 |= MLX4_FLAG2_V_IGNORE_FCS_MASK;
  1528. if (ignore_fcs_value)
  1529. context->ignore_fcs |= MLX4_IGNORE_FCS_MASK;
  1530. else
  1531. context->ignore_fcs &= ~MLX4_IGNORE_FCS_MASK;
  1532. in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
  1533. err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
  1534. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
  1535. mlx4_free_cmd_mailbox(dev, mailbox);
  1536. return err;
  1537. }
  1538. EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check);
  1539. enum {
  1540. VXLAN_ENABLE_MODIFY = 1 << 7,
  1541. VXLAN_STEERING_MODIFY = 1 << 6,
  1542. VXLAN_ENABLE = 1 << 7,
  1543. };
  1544. struct mlx4_set_port_vxlan_context {
  1545. u32 reserved1;
  1546. u8 modify_flags;
  1547. u8 reserved2;
  1548. u8 enable_flags;
  1549. u8 steering;
  1550. };
  1551. int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
  1552. {
  1553. int err;
  1554. u32 in_mod;
  1555. struct mlx4_cmd_mailbox *mailbox;
  1556. struct mlx4_set_port_vxlan_context *context;
  1557. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1558. if (IS_ERR(mailbox))
  1559. return PTR_ERR(mailbox);
  1560. context = mailbox->buf;
  1561. memset(context, 0, sizeof(*context));
  1562. context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
  1563. if (enable)
  1564. context->enable_flags = VXLAN_ENABLE;
  1565. context->steering = steering;
  1566. in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
  1567. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  1568. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1569. MLX4_CMD_NATIVE);
  1570. mlx4_free_cmd_mailbox(dev, mailbox);
  1571. return err;
  1572. }
  1573. EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
  1574. int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time)
  1575. {
  1576. int err;
  1577. struct mlx4_cmd_mailbox *mailbox;
  1578. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1579. if (IS_ERR(mailbox))
  1580. return PTR_ERR(mailbox);
  1581. *((__be32 *)mailbox->buf) = cpu_to_be32(time);
  1582. err = mlx4_cmd(dev, mailbox->dma, port, MLX4_SET_PORT_BEACON_OPCODE,
  1583. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1584. MLX4_CMD_NATIVE);
  1585. mlx4_free_cmd_mailbox(dev, mailbox);
  1586. return err;
  1587. }
  1588. EXPORT_SYMBOL(mlx4_SET_PORT_BEACON);
  1589. int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
  1590. struct mlx4_vhcr *vhcr,
  1591. struct mlx4_cmd_mailbox *inbox,
  1592. struct mlx4_cmd_mailbox *outbox,
  1593. struct mlx4_cmd_info *cmd)
  1594. {
  1595. int err = 0;
  1596. return err;
  1597. }
  1598. int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
  1599. u64 mac, u64 clear, u8 mode)
  1600. {
  1601. return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
  1602. MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
  1603. MLX4_CMD_WRAPPED);
  1604. }
  1605. EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
  1606. int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
  1607. struct mlx4_vhcr *vhcr,
  1608. struct mlx4_cmd_mailbox *inbox,
  1609. struct mlx4_cmd_mailbox *outbox,
  1610. struct mlx4_cmd_info *cmd)
  1611. {
  1612. int err = 0;
  1613. return err;
  1614. }
  1615. int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
  1616. struct mlx4_vhcr *vhcr,
  1617. struct mlx4_cmd_mailbox *inbox,
  1618. struct mlx4_cmd_mailbox *outbox,
  1619. struct mlx4_cmd_info *cmd)
  1620. {
  1621. return 0;
  1622. }
  1623. int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
  1624. int *slave_id)
  1625. {
  1626. struct mlx4_priv *priv = mlx4_priv(dev);
  1627. int i, found_ix = -1;
  1628. int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
  1629. struct mlx4_slaves_pport slaves_pport;
  1630. unsigned num_vfs;
  1631. int slave_gid;
  1632. if (!mlx4_is_mfunc(dev))
  1633. return -EINVAL;
  1634. slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
  1635. num_vfs = bitmap_weight(slaves_pport.slaves,
  1636. dev->persist->num_vfs + 1) - 1;
  1637. for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
  1638. if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
  1639. MLX4_ROCE_GID_ENTRY_SIZE)) {
  1640. found_ix = i;
  1641. break;
  1642. }
  1643. }
  1644. if (found_ix >= 0) {
  1645. /* Calculate a slave_gid which is the slave number in the gid
  1646. * table and not a globally unique slave number.
  1647. */
  1648. if (found_ix < MLX4_ROCE_PF_GIDS)
  1649. slave_gid = 0;
  1650. else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
  1651. (vf_gids / num_vfs + 1))
  1652. slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
  1653. (vf_gids / num_vfs + 1)) + 1;
  1654. else
  1655. slave_gid =
  1656. ((found_ix - MLX4_ROCE_PF_GIDS -
  1657. ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
  1658. (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
  1659. /* Calculate the globally unique slave id */
  1660. if (slave_gid) {
  1661. struct mlx4_active_ports exclusive_ports;
  1662. struct mlx4_active_ports actv_ports;
  1663. struct mlx4_slaves_pport slaves_pport_actv;
  1664. unsigned max_port_p_one;
  1665. int num_vfs_before = 0;
  1666. int candidate_slave_gid;
  1667. /* Calculate how many VFs are on the previous port, if exists */
  1668. for (i = 1; i < port; i++) {
  1669. bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
  1670. set_bit(i - 1, exclusive_ports.ports);
  1671. slaves_pport_actv =
  1672. mlx4_phys_to_slaves_pport_actv(
  1673. dev, &exclusive_ports);
  1674. num_vfs_before += bitmap_weight(
  1675. slaves_pport_actv.slaves,
  1676. dev->persist->num_vfs + 1);
  1677. }
  1678. /* candidate_slave_gid isn't necessarily the correct slave, but
  1679. * it has the same number of ports and is assigned to the same
  1680. * ports as the real slave we're looking for. On dual port VF,
  1681. * slave_gid = [single port VFs on port <port>] +
  1682. * [offset of the current slave from the first dual port VF] +
  1683. * 1 (for the PF).
  1684. */
  1685. candidate_slave_gid = slave_gid + num_vfs_before;
  1686. actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
  1687. max_port_p_one = find_first_bit(
  1688. actv_ports.ports, dev->caps.num_ports) +
  1689. bitmap_weight(actv_ports.ports,
  1690. dev->caps.num_ports) + 1;
  1691. /* Calculate the real slave number */
  1692. for (i = 1; i < max_port_p_one; i++) {
  1693. if (i == port)
  1694. continue;
  1695. bitmap_zero(exclusive_ports.ports,
  1696. dev->caps.num_ports);
  1697. set_bit(i - 1, exclusive_ports.ports);
  1698. slaves_pport_actv =
  1699. mlx4_phys_to_slaves_pport_actv(
  1700. dev, &exclusive_ports);
  1701. slave_gid += bitmap_weight(
  1702. slaves_pport_actv.slaves,
  1703. dev->persist->num_vfs + 1);
  1704. }
  1705. }
  1706. *slave_id = slave_gid;
  1707. }
  1708. return (found_ix >= 0) ? 0 : -EINVAL;
  1709. }
  1710. EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
  1711. int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
  1712. u8 *gid)
  1713. {
  1714. struct mlx4_priv *priv = mlx4_priv(dev);
  1715. if (!mlx4_is_master(dev))
  1716. return -EINVAL;
  1717. memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
  1718. MLX4_ROCE_GID_ENTRY_SIZE);
  1719. return 0;
  1720. }
  1721. EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
  1722. /* Cable Module Info */
  1723. #define MODULE_INFO_MAX_READ 48
  1724. #define I2C_ADDR_LOW 0x50
  1725. #define I2C_ADDR_HIGH 0x51
  1726. #define I2C_PAGE_SIZE 256
  1727. /* Module Info Data */
  1728. struct mlx4_cable_info {
  1729. u8 i2c_addr;
  1730. u8 page_num;
  1731. __be16 dev_mem_address;
  1732. __be16 reserved1;
  1733. __be16 size;
  1734. __be32 reserved2[2];
  1735. u8 data[MODULE_INFO_MAX_READ];
  1736. };
  1737. enum cable_info_err {
  1738. CABLE_INF_INV_PORT = 0x1,
  1739. CABLE_INF_OP_NOSUP = 0x2,
  1740. CABLE_INF_NOT_CONN = 0x3,
  1741. CABLE_INF_NO_EEPRM = 0x4,
  1742. CABLE_INF_PAGE_ERR = 0x5,
  1743. CABLE_INF_INV_ADDR = 0x6,
  1744. CABLE_INF_I2C_ADDR = 0x7,
  1745. CABLE_INF_QSFP_VIO = 0x8,
  1746. CABLE_INF_I2C_BUSY = 0x9,
  1747. };
  1748. #define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
  1749. static inline const char *cable_info_mad_err_str(u16 mad_status)
  1750. {
  1751. u8 err = MAD_STATUS_2_CABLE_ERR(mad_status);
  1752. switch (err) {
  1753. case CABLE_INF_INV_PORT:
  1754. return "invalid port selected";
  1755. case CABLE_INF_OP_NOSUP:
  1756. return "operation not supported for this port (the port is of type CX4 or internal)";
  1757. case CABLE_INF_NOT_CONN:
  1758. return "cable is not connected";
  1759. case CABLE_INF_NO_EEPRM:
  1760. return "the connected cable has no EPROM (passive copper cable)";
  1761. case CABLE_INF_PAGE_ERR:
  1762. return "page number is greater than 15";
  1763. case CABLE_INF_INV_ADDR:
  1764. return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
  1765. case CABLE_INF_I2C_ADDR:
  1766. return "invalid I2C slave address";
  1767. case CABLE_INF_QSFP_VIO:
  1768. return "at least one cable violates the QSFP specification and ignores the modsel signal";
  1769. case CABLE_INF_I2C_BUSY:
  1770. return "I2C bus is constantly busy";
  1771. }
  1772. return "Unknown Error";
  1773. }
  1774. /**
  1775. * mlx4_get_module_info - Read cable module eeprom data
  1776. * @dev: mlx4_dev.
  1777. * @port: port number.
  1778. * @offset: byte offset in eeprom to start reading data from.
  1779. * @size: num of bytes to read.
  1780. * @data: output buffer to put the requested data into.
  1781. *
  1782. * Reads cable module eeprom data, puts the outcome data into
  1783. * data pointer paramer.
  1784. * Returns num of read bytes on success or a negative error
  1785. * code.
  1786. */
  1787. int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
  1788. u16 offset, u16 size, u8 *data)
  1789. {
  1790. struct mlx4_cmd_mailbox *inbox, *outbox;
  1791. struct mlx4_mad_ifc *inmad, *outmad;
  1792. struct mlx4_cable_info *cable_info;
  1793. u16 i2c_addr;
  1794. int ret;
  1795. if (size > MODULE_INFO_MAX_READ)
  1796. size = MODULE_INFO_MAX_READ;
  1797. inbox = mlx4_alloc_cmd_mailbox(dev);
  1798. if (IS_ERR(inbox))
  1799. return PTR_ERR(inbox);
  1800. outbox = mlx4_alloc_cmd_mailbox(dev);
  1801. if (IS_ERR(outbox)) {
  1802. mlx4_free_cmd_mailbox(dev, inbox);
  1803. return PTR_ERR(outbox);
  1804. }
  1805. inmad = (struct mlx4_mad_ifc *)(inbox->buf);
  1806. outmad = (struct mlx4_mad_ifc *)(outbox->buf);
  1807. inmad->method = 0x1; /* Get */
  1808. inmad->class_version = 0x1;
  1809. inmad->mgmt_class = 0x1;
  1810. inmad->base_version = 0x1;
  1811. inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
  1812. if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE)
  1813. /* Cross pages reads are not allowed
  1814. * read until offset 256 in low page
  1815. */
  1816. size -= offset + size - I2C_PAGE_SIZE;
  1817. i2c_addr = I2C_ADDR_LOW;
  1818. cable_info = (struct mlx4_cable_info *)inmad->data;
  1819. cable_info->dev_mem_address = cpu_to_be16(offset);
  1820. cable_info->page_num = 0;
  1821. cable_info->i2c_addr = i2c_addr;
  1822. cable_info->size = cpu_to_be16(size);
  1823. ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
  1824. MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
  1825. MLX4_CMD_NATIVE);
  1826. if (ret)
  1827. goto out;
  1828. if (be16_to_cpu(outmad->status)) {
  1829. /* Mad returned with bad status */
  1830. ret = be16_to_cpu(outmad->status);
  1831. mlx4_warn(dev,
  1832. "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
  1833. 0xFF60, port, i2c_addr, offset, size,
  1834. ret, cable_info_mad_err_str(ret));
  1835. if (i2c_addr == I2C_ADDR_HIGH &&
  1836. MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR)
  1837. /* Some SFP cables do not support i2c slave
  1838. * address 0x51 (high page), abort silently.
  1839. */
  1840. ret = 0;
  1841. else
  1842. ret = -ret;
  1843. goto out;
  1844. }
  1845. cable_info = (struct mlx4_cable_info *)outmad->data;
  1846. memcpy(data, cable_info->data, size);
  1847. ret = size;
  1848. out:
  1849. mlx4_free_cmd_mailbox(dev, inbox);
  1850. mlx4_free_cmd_mailbox(dev, outbox);
  1851. return ret;
  1852. }
  1853. EXPORT_SYMBOL(mlx4_get_module_info);
  1854. int mlx4_max_tc(struct mlx4_dev *dev)
  1855. {
  1856. u8 num_tc = dev->caps.max_tc_eth;
  1857. if (!num_tc)
  1858. num_tc = MLX4_TC_MAX_NUMBER;
  1859. return num_tc;
  1860. }
  1861. EXPORT_SYMBOL(mlx4_max_tc);