port.c 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #include <linux/errno.h>
  33. #include <linux/if_ether.h>
  34. #include <linux/if_vlan.h>
  35. #include <linux/export.h>
  36. #include <linux/mlx4/cmd.h>
  37. #include "mlx4.h"
  38. #include "mlx4_stats.h"
  39. #define MLX4_MAC_VALID (1ull << 63)
  40. #define MLX4_VLAN_VALID (1u << 31)
  41. #define MLX4_VLAN_MASK 0xfff
  42. #define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
  43. #define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
  44. #define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
  45. #define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
  46. #define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2
  47. #define MLX4_IGNORE_FCS_MASK 0x1
  48. #define MLX4_TC_MAX_NUMBER 8
  49. void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
  50. {
  51. int i;
  52. mutex_init(&table->mutex);
  53. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  54. table->entries[i] = 0;
  55. table->refs[i] = 0;
  56. table->is_dup[i] = false;
  57. }
  58. table->max = 1 << dev->caps.log_num_macs;
  59. table->total = 0;
  60. }
  61. void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
  62. {
  63. int i;
  64. mutex_init(&table->mutex);
  65. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  66. table->entries[i] = 0;
  67. table->refs[i] = 0;
  68. table->is_dup[i] = false;
  69. }
  70. table->max = (1 << dev->caps.log_num_vlans) - MLX4_VLAN_REGULAR;
  71. table->total = 0;
  72. }
  73. void mlx4_init_roce_gid_table(struct mlx4_dev *dev,
  74. struct mlx4_roce_gid_table *table)
  75. {
  76. int i;
  77. mutex_init(&table->mutex);
  78. for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++)
  79. memset(table->roce_gids[i].raw, 0, MLX4_ROCE_GID_ENTRY_SIZE);
  80. }
  81. static int validate_index(struct mlx4_dev *dev,
  82. struct mlx4_mac_table *table, int index)
  83. {
  84. int err = 0;
  85. if (index < 0 || index >= table->max || !table->entries[index]) {
  86. mlx4_warn(dev, "No valid Mac entry for the given index\n");
  87. err = -EINVAL;
  88. }
  89. return err;
  90. }
  91. static int find_index(struct mlx4_dev *dev,
  92. struct mlx4_mac_table *table, u64 mac)
  93. {
  94. int i;
  95. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  96. if (table->refs[i] &&
  97. (MLX4_MAC_MASK & mac) ==
  98. (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
  99. return i;
  100. }
  101. /* Mac not found */
  102. return -EINVAL;
  103. }
  104. static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
  105. __be64 *entries)
  106. {
  107. struct mlx4_cmd_mailbox *mailbox;
  108. u32 in_mod;
  109. int err;
  110. mailbox = mlx4_alloc_cmd_mailbox(dev);
  111. if (IS_ERR(mailbox))
  112. return PTR_ERR(mailbox);
  113. memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
  114. in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
  115. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  116. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  117. MLX4_CMD_NATIVE);
  118. mlx4_free_cmd_mailbox(dev, mailbox);
  119. return err;
  120. }
  121. int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx)
  122. {
  123. struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
  124. struct mlx4_mac_table *table = &info->mac_table;
  125. int i;
  126. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  127. if (!table->refs[i])
  128. continue;
  129. if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
  130. *idx = i;
  131. return 0;
  132. }
  133. }
  134. return -ENOENT;
  135. }
  136. EXPORT_SYMBOL_GPL(mlx4_find_cached_mac);
  137. static bool mlx4_need_mf_bond(struct mlx4_dev *dev)
  138. {
  139. int i, num_eth_ports = 0;
  140. if (!mlx4_is_mfunc(dev))
  141. return false;
  142. mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
  143. ++num_eth_ports;
  144. return (num_eth_ports == 2) ? true : false;
  145. }
  146. int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  147. {
  148. struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
  149. struct mlx4_mac_table *table = &info->mac_table;
  150. int i, err = 0;
  151. int free = -1;
  152. int free_for_dup = -1;
  153. bool dup = mlx4_is_mf_bonded(dev);
  154. u8 dup_port = (port == 1) ? 2 : 1;
  155. struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
  156. bool need_mf_bond = mlx4_need_mf_bond(dev);
  157. bool can_mf_bond = true;
  158. mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d %s duplicate\n",
  159. (unsigned long long)mac, port,
  160. dup ? "with" : "without");
  161. if (need_mf_bond) {
  162. if (port == 1) {
  163. mutex_lock(&table->mutex);
  164. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  165. } else {
  166. mutex_lock(&dup_table->mutex);
  167. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  168. }
  169. } else {
  170. mutex_lock(&table->mutex);
  171. }
  172. if (need_mf_bond) {
  173. int index_at_port = -1;
  174. int index_at_dup_port = -1;
  175. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  176. if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))))
  177. index_at_port = i;
  178. if (((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]))))
  179. index_at_dup_port = i;
  180. }
  181. /* check that same mac is not in the tables at different indices */
  182. if ((index_at_port != index_at_dup_port) &&
  183. (index_at_port >= 0) &&
  184. (index_at_dup_port >= 0))
  185. can_mf_bond = false;
  186. /* If the mac is already in the primary table, the slot must be
  187. * available in the duplicate table as well.
  188. */
  189. if (index_at_port >= 0 && index_at_dup_port < 0 &&
  190. dup_table->refs[index_at_port]) {
  191. can_mf_bond = false;
  192. }
  193. /* If the mac is already in the duplicate table, check that the
  194. * corresponding index is not occupied in the primary table, or
  195. * the primary table already contains the mac at the same index.
  196. * Otherwise, you cannot bond (primary contains a different mac
  197. * at that index).
  198. */
  199. if (index_at_dup_port >= 0) {
  200. if (!table->refs[index_at_dup_port] ||
  201. ((MLX4_MAC_MASK & mac) == (MLX4_MAC_MASK & be64_to_cpu(table->entries[index_at_dup_port]))))
  202. free_for_dup = index_at_dup_port;
  203. else
  204. can_mf_bond = false;
  205. }
  206. }
  207. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  208. if (!table->refs[i]) {
  209. if (free < 0)
  210. free = i;
  211. if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
  212. if (!dup_table->refs[i])
  213. free_for_dup = i;
  214. }
  215. continue;
  216. }
  217. if ((MLX4_MAC_MASK & mac) ==
  218. (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
  219. /* MAC already registered, increment ref count */
  220. err = i;
  221. ++table->refs[i];
  222. if (dup) {
  223. u64 dup_mac = MLX4_MAC_MASK & be64_to_cpu(dup_table->entries[i]);
  224. if (dup_mac != mac || !dup_table->is_dup[i]) {
  225. mlx4_warn(dev, "register mac: expect duplicate mac 0x%llx on port %d index %d\n",
  226. mac, dup_port, i);
  227. }
  228. }
  229. goto out;
  230. }
  231. }
  232. if (need_mf_bond && (free_for_dup < 0)) {
  233. if (dup) {
  234. mlx4_warn(dev, "Fail to allocate duplicate MAC table entry\n");
  235. mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
  236. dup = false;
  237. }
  238. can_mf_bond = false;
  239. }
  240. if (need_mf_bond && can_mf_bond)
  241. free = free_for_dup;
  242. mlx4_dbg(dev, "Free MAC index is %d\n", free);
  243. if (table->total == table->max) {
  244. /* No free mac entries */
  245. err = -ENOSPC;
  246. goto out;
  247. }
  248. /* Register new MAC */
  249. table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
  250. err = mlx4_set_port_mac_table(dev, port, table->entries);
  251. if (unlikely(err)) {
  252. mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
  253. (unsigned long long) mac);
  254. table->entries[free] = 0;
  255. goto out;
  256. }
  257. table->refs[free] = 1;
  258. table->is_dup[free] = false;
  259. ++table->total;
  260. if (dup) {
  261. dup_table->refs[free] = 0;
  262. dup_table->is_dup[free] = true;
  263. dup_table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
  264. err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
  265. if (unlikely(err)) {
  266. mlx4_warn(dev, "Failed adding duplicate mac: 0x%llx\n", mac);
  267. dup_table->is_dup[free] = false;
  268. dup_table->entries[free] = 0;
  269. goto out;
  270. }
  271. ++dup_table->total;
  272. }
  273. err = free;
  274. out:
  275. if (need_mf_bond) {
  276. if (port == 2) {
  277. mutex_unlock(&table->mutex);
  278. mutex_unlock(&dup_table->mutex);
  279. } else {
  280. mutex_unlock(&dup_table->mutex);
  281. mutex_unlock(&table->mutex);
  282. }
  283. } else {
  284. mutex_unlock(&table->mutex);
  285. }
  286. return err;
  287. }
  288. EXPORT_SYMBOL_GPL(__mlx4_register_mac);
  289. int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  290. {
  291. u64 out_param = 0;
  292. int err = -EINVAL;
  293. if (mlx4_is_mfunc(dev)) {
  294. if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
  295. err = mlx4_cmd_imm(dev, mac, &out_param,
  296. ((u32) port) << 8 | (u32) RES_MAC,
  297. RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
  298. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  299. }
  300. if (err && err == -EINVAL && mlx4_is_slave(dev)) {
  301. /* retry using old REG_MAC format */
  302. set_param_l(&out_param, port);
  303. err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
  304. RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
  305. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  306. if (!err)
  307. dev->flags |= MLX4_FLAG_OLD_REG_MAC;
  308. }
  309. if (err)
  310. return err;
  311. return get_param_l(&out_param);
  312. }
  313. return __mlx4_register_mac(dev, port, mac);
  314. }
  315. EXPORT_SYMBOL_GPL(mlx4_register_mac);
  316. int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port)
  317. {
  318. return dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
  319. (port - 1) * (1 << dev->caps.log_num_macs);
  320. }
  321. EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
  322. void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  323. {
  324. struct mlx4_port_info *info;
  325. struct mlx4_mac_table *table;
  326. int index;
  327. bool dup = mlx4_is_mf_bonded(dev);
  328. u8 dup_port = (port == 1) ? 2 : 1;
  329. struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
  330. if (port < 1 || port > dev->caps.num_ports) {
  331. mlx4_warn(dev, "invalid port number (%d), aborting...\n", port);
  332. return;
  333. }
  334. info = &mlx4_priv(dev)->port[port];
  335. table = &info->mac_table;
  336. if (dup) {
  337. if (port == 1) {
  338. mutex_lock(&table->mutex);
  339. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  340. } else {
  341. mutex_lock(&dup_table->mutex);
  342. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  343. }
  344. } else {
  345. mutex_lock(&table->mutex);
  346. }
  347. index = find_index(dev, table, mac);
  348. if (validate_index(dev, table, index))
  349. goto out;
  350. if (--table->refs[index] || table->is_dup[index]) {
  351. mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
  352. index);
  353. if (!table->refs[index])
  354. dup_table->is_dup[index] = false;
  355. goto out;
  356. }
  357. table->entries[index] = 0;
  358. if (mlx4_set_port_mac_table(dev, port, table->entries))
  359. mlx4_warn(dev, "Fail to set mac in port %d during unregister\n", port);
  360. --table->total;
  361. if (dup) {
  362. dup_table->is_dup[index] = false;
  363. if (dup_table->refs[index])
  364. goto out;
  365. dup_table->entries[index] = 0;
  366. if (mlx4_set_port_mac_table(dev, dup_port, dup_table->entries))
  367. mlx4_warn(dev, "Fail to set mac in duplicate port %d during unregister\n", dup_port);
  368. --table->total;
  369. }
  370. out:
  371. if (dup) {
  372. if (port == 2) {
  373. mutex_unlock(&table->mutex);
  374. mutex_unlock(&dup_table->mutex);
  375. } else {
  376. mutex_unlock(&dup_table->mutex);
  377. mutex_unlock(&table->mutex);
  378. }
  379. } else {
  380. mutex_unlock(&table->mutex);
  381. }
  382. }
  383. EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
  384. void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
  385. {
  386. u64 out_param = 0;
  387. if (mlx4_is_mfunc(dev)) {
  388. if (!(dev->flags & MLX4_FLAG_OLD_REG_MAC)) {
  389. (void) mlx4_cmd_imm(dev, mac, &out_param,
  390. ((u32) port) << 8 | (u32) RES_MAC,
  391. RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
  392. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  393. } else {
  394. /* use old unregister mac format */
  395. set_param_l(&out_param, port);
  396. (void) mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
  397. RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
  398. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  399. }
  400. return;
  401. }
  402. __mlx4_unregister_mac(dev, port, mac);
  403. return;
  404. }
  405. EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
  406. int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
  407. {
  408. struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
  409. struct mlx4_mac_table *table = &info->mac_table;
  410. int index = qpn - info->base_qpn;
  411. int err = 0;
  412. bool dup = mlx4_is_mf_bonded(dev);
  413. u8 dup_port = (port == 1) ? 2 : 1;
  414. struct mlx4_mac_table *dup_table = &mlx4_priv(dev)->port[dup_port].mac_table;
  415. /* CX1 doesn't support multi-functions */
  416. if (dup) {
  417. if (port == 1) {
  418. mutex_lock(&table->mutex);
  419. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  420. } else {
  421. mutex_lock(&dup_table->mutex);
  422. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  423. }
  424. } else {
  425. mutex_lock(&table->mutex);
  426. }
  427. err = validate_index(dev, table, index);
  428. if (err)
  429. goto out;
  430. table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
  431. err = mlx4_set_port_mac_table(dev, port, table->entries);
  432. if (unlikely(err)) {
  433. mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
  434. (unsigned long long) new_mac);
  435. table->entries[index] = 0;
  436. } else {
  437. if (dup) {
  438. dup_table->entries[index] = cpu_to_be64(new_mac | MLX4_MAC_VALID);
  439. err = mlx4_set_port_mac_table(dev, dup_port, dup_table->entries);
  440. if (unlikely(err)) {
  441. mlx4_err(dev, "Failed adding duplicate MAC: 0x%llx\n",
  442. (unsigned long long)new_mac);
  443. dup_table->entries[index] = 0;
  444. }
  445. }
  446. }
  447. out:
  448. if (dup) {
  449. if (port == 2) {
  450. mutex_unlock(&table->mutex);
  451. mutex_unlock(&dup_table->mutex);
  452. } else {
  453. mutex_unlock(&dup_table->mutex);
  454. mutex_unlock(&table->mutex);
  455. }
  456. } else {
  457. mutex_unlock(&table->mutex);
  458. }
  459. return err;
  460. }
  461. EXPORT_SYMBOL_GPL(__mlx4_replace_mac);
  462. static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
  463. __be32 *entries)
  464. {
  465. struct mlx4_cmd_mailbox *mailbox;
  466. u32 in_mod;
  467. int err;
  468. mailbox = mlx4_alloc_cmd_mailbox(dev);
  469. if (IS_ERR(mailbox))
  470. return PTR_ERR(mailbox);
  471. memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
  472. in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
  473. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  474. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  475. MLX4_CMD_NATIVE);
  476. mlx4_free_cmd_mailbox(dev, mailbox);
  477. return err;
  478. }
  479. int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
  480. {
  481. struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
  482. int i;
  483. for (i = 0; i < MLX4_MAX_VLAN_NUM; ++i) {
  484. if (table->refs[i] &&
  485. (vid == (MLX4_VLAN_MASK &
  486. be32_to_cpu(table->entries[i])))) {
  487. /* VLAN already registered, increase reference count */
  488. *idx = i;
  489. return 0;
  490. }
  491. }
  492. return -ENOENT;
  493. }
  494. EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
  495. int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
  496. int *index)
  497. {
  498. struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
  499. int i, err = 0;
  500. int free = -1;
  501. int free_for_dup = -1;
  502. bool dup = mlx4_is_mf_bonded(dev);
  503. u8 dup_port = (port == 1) ? 2 : 1;
  504. struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
  505. bool need_mf_bond = mlx4_need_mf_bond(dev);
  506. bool can_mf_bond = true;
  507. mlx4_dbg(dev, "Registering VLAN: %d for port %d %s duplicate\n",
  508. vlan, port,
  509. dup ? "with" : "without");
  510. if (need_mf_bond) {
  511. if (port == 1) {
  512. mutex_lock(&table->mutex);
  513. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  514. } else {
  515. mutex_lock(&dup_table->mutex);
  516. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  517. }
  518. } else {
  519. mutex_lock(&table->mutex);
  520. }
  521. if (table->total == table->max) {
  522. /* No free vlan entries */
  523. err = -ENOSPC;
  524. goto out;
  525. }
  526. if (need_mf_bond) {
  527. int index_at_port = -1;
  528. int index_at_dup_port = -1;
  529. for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
  530. if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(table->entries[i]))))
  531. index_at_port = i;
  532. if ((vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]))))
  533. index_at_dup_port = i;
  534. }
  535. /* check that same vlan is not in the tables at different indices */
  536. if ((index_at_port != index_at_dup_port) &&
  537. (index_at_port >= 0) &&
  538. (index_at_dup_port >= 0))
  539. can_mf_bond = false;
  540. /* If the vlan is already in the primary table, the slot must be
  541. * available in the duplicate table as well.
  542. */
  543. if (index_at_port >= 0 && index_at_dup_port < 0 &&
  544. dup_table->refs[index_at_port]) {
  545. can_mf_bond = false;
  546. }
  547. /* If the vlan is already in the duplicate table, check that the
  548. * corresponding index is not occupied in the primary table, or
  549. * the primary table already contains the vlan at the same index.
  550. * Otherwise, you cannot bond (primary contains a different vlan
  551. * at that index).
  552. */
  553. if (index_at_dup_port >= 0) {
  554. if (!table->refs[index_at_dup_port] ||
  555. (vlan == (MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[index_at_dup_port]))))
  556. free_for_dup = index_at_dup_port;
  557. else
  558. can_mf_bond = false;
  559. }
  560. }
  561. for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
  562. if (!table->refs[i]) {
  563. if (free < 0)
  564. free = i;
  565. if (free_for_dup < 0 && need_mf_bond && can_mf_bond) {
  566. if (!dup_table->refs[i])
  567. free_for_dup = i;
  568. }
  569. }
  570. if ((table->refs[i] || table->is_dup[i]) &&
  571. (vlan == (MLX4_VLAN_MASK &
  572. be32_to_cpu(table->entries[i])))) {
  573. /* Vlan already registered, increase references count */
  574. mlx4_dbg(dev, "vlan %u is already registered.\n", vlan);
  575. *index = i;
  576. ++table->refs[i];
  577. if (dup) {
  578. u16 dup_vlan = MLX4_VLAN_MASK & be32_to_cpu(dup_table->entries[i]);
  579. if (dup_vlan != vlan || !dup_table->is_dup[i]) {
  580. mlx4_warn(dev, "register vlan: expected duplicate vlan %u on port %d index %d\n",
  581. vlan, dup_port, i);
  582. }
  583. }
  584. goto out;
  585. }
  586. }
  587. if (need_mf_bond && (free_for_dup < 0)) {
  588. if (dup) {
  589. mlx4_warn(dev, "Fail to allocate duplicate VLAN table entry\n");
  590. mlx4_warn(dev, "High Availability for virtual functions may not work as expected\n");
  591. dup = false;
  592. }
  593. can_mf_bond = false;
  594. }
  595. if (need_mf_bond && can_mf_bond)
  596. free = free_for_dup;
  597. if (free < 0) {
  598. err = -ENOMEM;
  599. goto out;
  600. }
  601. /* Register new VLAN */
  602. table->refs[free] = 1;
  603. table->is_dup[free] = false;
  604. table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
  605. err = mlx4_set_port_vlan_table(dev, port, table->entries);
  606. if (unlikely(err)) {
  607. mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
  608. table->refs[free] = 0;
  609. table->entries[free] = 0;
  610. goto out;
  611. }
  612. ++table->total;
  613. if (dup) {
  614. dup_table->refs[free] = 0;
  615. dup_table->is_dup[free] = true;
  616. dup_table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
  617. err = mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries);
  618. if (unlikely(err)) {
  619. mlx4_warn(dev, "Failed adding duplicate vlan: %u\n", vlan);
  620. dup_table->is_dup[free] = false;
  621. dup_table->entries[free] = 0;
  622. goto out;
  623. }
  624. ++dup_table->total;
  625. }
  626. *index = free;
  627. out:
  628. if (need_mf_bond) {
  629. if (port == 2) {
  630. mutex_unlock(&table->mutex);
  631. mutex_unlock(&dup_table->mutex);
  632. } else {
  633. mutex_unlock(&dup_table->mutex);
  634. mutex_unlock(&table->mutex);
  635. }
  636. } else {
  637. mutex_unlock(&table->mutex);
  638. }
  639. return err;
  640. }
  641. int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
  642. {
  643. u64 out_param = 0;
  644. int err;
  645. if (vlan > 4095)
  646. return -EINVAL;
  647. if (mlx4_is_mfunc(dev)) {
  648. err = mlx4_cmd_imm(dev, vlan, &out_param,
  649. ((u32) port) << 8 | (u32) RES_VLAN,
  650. RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
  651. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
  652. if (!err)
  653. *index = get_param_l(&out_param);
  654. return err;
  655. }
  656. return __mlx4_register_vlan(dev, port, vlan, index);
  657. }
  658. EXPORT_SYMBOL_GPL(mlx4_register_vlan);
  659. void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
  660. {
  661. struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
  662. int index;
  663. bool dup = mlx4_is_mf_bonded(dev);
  664. u8 dup_port = (port == 1) ? 2 : 1;
  665. struct mlx4_vlan_table *dup_table = &mlx4_priv(dev)->port[dup_port].vlan_table;
  666. if (dup) {
  667. if (port == 1) {
  668. mutex_lock(&table->mutex);
  669. mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
  670. } else {
  671. mutex_lock(&dup_table->mutex);
  672. mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
  673. }
  674. } else {
  675. mutex_lock(&table->mutex);
  676. }
  677. if (mlx4_find_cached_vlan(dev, port, vlan, &index)) {
  678. mlx4_warn(dev, "vlan 0x%x is not in the vlan table\n", vlan);
  679. goto out;
  680. }
  681. if (index < MLX4_VLAN_REGULAR) {
  682. mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
  683. goto out;
  684. }
  685. if (--table->refs[index] || table->is_dup[index]) {
  686. mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
  687. table->refs[index], index);
  688. if (!table->refs[index])
  689. dup_table->is_dup[index] = false;
  690. goto out;
  691. }
  692. table->entries[index] = 0;
  693. if (mlx4_set_port_vlan_table(dev, port, table->entries))
  694. mlx4_warn(dev, "Fail to set vlan in port %d during unregister\n", port);
  695. --table->total;
  696. if (dup) {
  697. dup_table->is_dup[index] = false;
  698. if (dup_table->refs[index])
  699. goto out;
  700. dup_table->entries[index] = 0;
  701. if (mlx4_set_port_vlan_table(dev, dup_port, dup_table->entries))
  702. mlx4_warn(dev, "Fail to set vlan in duplicate port %d during unregister\n", dup_port);
  703. --dup_table->total;
  704. }
  705. out:
  706. if (dup) {
  707. if (port == 2) {
  708. mutex_unlock(&table->mutex);
  709. mutex_unlock(&dup_table->mutex);
  710. } else {
  711. mutex_unlock(&dup_table->mutex);
  712. mutex_unlock(&table->mutex);
  713. }
  714. } else {
  715. mutex_unlock(&table->mutex);
  716. }
  717. }
  718. void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
  719. {
  720. u64 out_param = 0;
  721. if (mlx4_is_mfunc(dev)) {
  722. (void) mlx4_cmd_imm(dev, vlan, &out_param,
  723. ((u32) port) << 8 | (u32) RES_VLAN,
  724. RES_OP_RESERVE_AND_MAP,
  725. MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
  726. MLX4_CMD_WRAPPED);
  727. return;
  728. }
  729. __mlx4_unregister_vlan(dev, port, vlan);
  730. }
  731. EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
  732. int mlx4_bond_mac_table(struct mlx4_dev *dev)
  733. {
  734. struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
  735. struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
  736. int ret = 0;
  737. int i;
  738. bool update1 = false;
  739. bool update2 = false;
  740. mutex_lock(&t1->mutex);
  741. mutex_lock(&t2->mutex);
  742. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  743. if ((t1->entries[i] != t2->entries[i]) &&
  744. t1->entries[i] && t2->entries[i]) {
  745. mlx4_warn(dev, "can't duplicate entry %d in mac table\n", i);
  746. ret = -EINVAL;
  747. goto unlock;
  748. }
  749. }
  750. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  751. if (t1->entries[i] && !t2->entries[i]) {
  752. t2->entries[i] = t1->entries[i];
  753. t2->is_dup[i] = true;
  754. update2 = true;
  755. } else if (!t1->entries[i] && t2->entries[i]) {
  756. t1->entries[i] = t2->entries[i];
  757. t1->is_dup[i] = true;
  758. update1 = true;
  759. } else if (t1->entries[i] && t2->entries[i]) {
  760. t1->is_dup[i] = true;
  761. t2->is_dup[i] = true;
  762. }
  763. }
  764. if (update1) {
  765. ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
  766. if (ret)
  767. mlx4_warn(dev, "failed to set MAC table for port 1 (%d)\n", ret);
  768. }
  769. if (!ret && update2) {
  770. ret = mlx4_set_port_mac_table(dev, 2, t2->entries);
  771. if (ret)
  772. mlx4_warn(dev, "failed to set MAC table for port 2 (%d)\n", ret);
  773. }
  774. if (ret)
  775. mlx4_warn(dev, "failed to create mirror MAC tables\n");
  776. unlock:
  777. mutex_unlock(&t2->mutex);
  778. mutex_unlock(&t1->mutex);
  779. return ret;
  780. }
  781. int mlx4_unbond_mac_table(struct mlx4_dev *dev)
  782. {
  783. struct mlx4_mac_table *t1 = &mlx4_priv(dev)->port[1].mac_table;
  784. struct mlx4_mac_table *t2 = &mlx4_priv(dev)->port[2].mac_table;
  785. int ret = 0;
  786. int ret1;
  787. int i;
  788. bool update1 = false;
  789. bool update2 = false;
  790. mutex_lock(&t1->mutex);
  791. mutex_lock(&t2->mutex);
  792. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  793. if (t1->entries[i] != t2->entries[i]) {
  794. mlx4_warn(dev, "mac table is in an unexpected state when trying to unbond\n");
  795. ret = -EINVAL;
  796. goto unlock;
  797. }
  798. }
  799. for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
  800. if (!t1->entries[i])
  801. continue;
  802. t1->is_dup[i] = false;
  803. if (!t1->refs[i]) {
  804. t1->entries[i] = 0;
  805. update1 = true;
  806. }
  807. t2->is_dup[i] = false;
  808. if (!t2->refs[i]) {
  809. t2->entries[i] = 0;
  810. update2 = true;
  811. }
  812. }
  813. if (update1) {
  814. ret = mlx4_set_port_mac_table(dev, 1, t1->entries);
  815. if (ret)
  816. mlx4_warn(dev, "failed to unmirror MAC tables for port 1(%d)\n", ret);
  817. }
  818. if (update2) {
  819. ret1 = mlx4_set_port_mac_table(dev, 2, t2->entries);
  820. if (ret1) {
  821. mlx4_warn(dev, "failed to unmirror MAC tables for port 2(%d)\n", ret1);
  822. ret = ret1;
  823. }
  824. }
  825. unlock:
  826. mutex_unlock(&t2->mutex);
  827. mutex_unlock(&t1->mutex);
  828. return ret;
  829. }
  830. int mlx4_bond_vlan_table(struct mlx4_dev *dev)
  831. {
  832. struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
  833. struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
  834. int ret = 0;
  835. int i;
  836. bool update1 = false;
  837. bool update2 = false;
  838. mutex_lock(&t1->mutex);
  839. mutex_lock(&t2->mutex);
  840. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  841. if ((t1->entries[i] != t2->entries[i]) &&
  842. t1->entries[i] && t2->entries[i]) {
  843. mlx4_warn(dev, "can't duplicate entry %d in vlan table\n", i);
  844. ret = -EINVAL;
  845. goto unlock;
  846. }
  847. }
  848. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  849. if (t1->entries[i] && !t2->entries[i]) {
  850. t2->entries[i] = t1->entries[i];
  851. t2->is_dup[i] = true;
  852. update2 = true;
  853. } else if (!t1->entries[i] && t2->entries[i]) {
  854. t1->entries[i] = t2->entries[i];
  855. t1->is_dup[i] = true;
  856. update1 = true;
  857. } else if (t1->entries[i] && t2->entries[i]) {
  858. t1->is_dup[i] = true;
  859. t2->is_dup[i] = true;
  860. }
  861. }
  862. if (update1) {
  863. ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
  864. if (ret)
  865. mlx4_warn(dev, "failed to set VLAN table for port 1 (%d)\n", ret);
  866. }
  867. if (!ret && update2) {
  868. ret = mlx4_set_port_vlan_table(dev, 2, t2->entries);
  869. if (ret)
  870. mlx4_warn(dev, "failed to set VLAN table for port 2 (%d)\n", ret);
  871. }
  872. if (ret)
  873. mlx4_warn(dev, "failed to create mirror VLAN tables\n");
  874. unlock:
  875. mutex_unlock(&t2->mutex);
  876. mutex_unlock(&t1->mutex);
  877. return ret;
  878. }
  879. int mlx4_unbond_vlan_table(struct mlx4_dev *dev)
  880. {
  881. struct mlx4_vlan_table *t1 = &mlx4_priv(dev)->port[1].vlan_table;
  882. struct mlx4_vlan_table *t2 = &mlx4_priv(dev)->port[2].vlan_table;
  883. int ret = 0;
  884. int ret1;
  885. int i;
  886. bool update1 = false;
  887. bool update2 = false;
  888. mutex_lock(&t1->mutex);
  889. mutex_lock(&t2->mutex);
  890. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  891. if (t1->entries[i] != t2->entries[i]) {
  892. mlx4_warn(dev, "vlan table is in an unexpected state when trying to unbond\n");
  893. ret = -EINVAL;
  894. goto unlock;
  895. }
  896. }
  897. for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
  898. if (!t1->entries[i])
  899. continue;
  900. t1->is_dup[i] = false;
  901. if (!t1->refs[i]) {
  902. t1->entries[i] = 0;
  903. update1 = true;
  904. }
  905. t2->is_dup[i] = false;
  906. if (!t2->refs[i]) {
  907. t2->entries[i] = 0;
  908. update2 = true;
  909. }
  910. }
  911. if (update1) {
  912. ret = mlx4_set_port_vlan_table(dev, 1, t1->entries);
  913. if (ret)
  914. mlx4_warn(dev, "failed to unmirror VLAN tables for port 1(%d)\n", ret);
  915. }
  916. if (update2) {
  917. ret1 = mlx4_set_port_vlan_table(dev, 2, t2->entries);
  918. if (ret1) {
  919. mlx4_warn(dev, "failed to unmirror VLAN tables for port 2(%d)\n", ret1);
  920. ret = ret1;
  921. }
  922. }
  923. unlock:
  924. mutex_unlock(&t2->mutex);
  925. mutex_unlock(&t1->mutex);
  926. return ret;
  927. }
  928. int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
  929. {
  930. struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
  931. u8 *inbuf, *outbuf;
  932. int err;
  933. inmailbox = mlx4_alloc_cmd_mailbox(dev);
  934. if (IS_ERR(inmailbox))
  935. return PTR_ERR(inmailbox);
  936. outmailbox = mlx4_alloc_cmd_mailbox(dev);
  937. if (IS_ERR(outmailbox)) {
  938. mlx4_free_cmd_mailbox(dev, inmailbox);
  939. return PTR_ERR(outmailbox);
  940. }
  941. inbuf = inmailbox->buf;
  942. outbuf = outmailbox->buf;
  943. inbuf[0] = 1;
  944. inbuf[1] = 1;
  945. inbuf[2] = 1;
  946. inbuf[3] = 1;
  947. *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
  948. *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
  949. err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
  950. MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
  951. MLX4_CMD_NATIVE);
  952. if (!err)
  953. *caps = *(__be32 *) (outbuf + 84);
  954. mlx4_free_cmd_mailbox(dev, inmailbox);
  955. mlx4_free_cmd_mailbox(dev, outmailbox);
  956. return err;
  957. }
  958. static struct mlx4_roce_gid_entry zgid_entry;
  959. int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
  960. {
  961. int vfs;
  962. int slave_gid = slave;
  963. unsigned i;
  964. struct mlx4_slaves_pport slaves_pport;
  965. struct mlx4_active_ports actv_ports;
  966. unsigned max_port_p_one;
  967. if (slave == 0)
  968. return MLX4_ROCE_PF_GIDS;
  969. /* Slave is a VF */
  970. slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
  971. actv_ports = mlx4_get_active_ports(dev, slave);
  972. max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
  973. bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
  974. for (i = 1; i < max_port_p_one; i++) {
  975. struct mlx4_active_ports exclusive_ports;
  976. struct mlx4_slaves_pport slaves_pport_actv;
  977. bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
  978. set_bit(i - 1, exclusive_ports.ports);
  979. if (i == port)
  980. continue;
  981. slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
  982. dev, &exclusive_ports);
  983. slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
  984. dev->persist->num_vfs + 1);
  985. }
  986. vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
  987. if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
  988. return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
  989. return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
  990. }
  991. int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
  992. {
  993. int gids;
  994. unsigned i;
  995. int slave_gid = slave;
  996. int vfs;
  997. struct mlx4_slaves_pport slaves_pport;
  998. struct mlx4_active_ports actv_ports;
  999. unsigned max_port_p_one;
  1000. if (slave == 0)
  1001. return 0;
  1002. slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
  1003. actv_ports = mlx4_get_active_ports(dev, slave);
  1004. max_port_p_one = find_first_bit(actv_ports.ports, dev->caps.num_ports) +
  1005. bitmap_weight(actv_ports.ports, dev->caps.num_ports) + 1;
  1006. for (i = 1; i < max_port_p_one; i++) {
  1007. struct mlx4_active_ports exclusive_ports;
  1008. struct mlx4_slaves_pport slaves_pport_actv;
  1009. bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
  1010. set_bit(i - 1, exclusive_ports.ports);
  1011. if (i == port)
  1012. continue;
  1013. slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
  1014. dev, &exclusive_ports);
  1015. slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
  1016. dev->persist->num_vfs + 1);
  1017. }
  1018. gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
  1019. vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
  1020. if (slave_gid <= gids % vfs)
  1021. return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
  1022. return MLX4_ROCE_PF_GIDS + (gids % vfs) +
  1023. ((gids / vfs) * (slave_gid - 1));
  1024. }
  1025. EXPORT_SYMBOL_GPL(mlx4_get_base_gid_ix);
  1026. static int mlx4_reset_roce_port_gids(struct mlx4_dev *dev, int slave,
  1027. int port, struct mlx4_cmd_mailbox *mailbox)
  1028. {
  1029. struct mlx4_roce_gid_entry *gid_entry_mbox;
  1030. struct mlx4_priv *priv = mlx4_priv(dev);
  1031. int num_gids, base, offset;
  1032. int i, err;
  1033. num_gids = mlx4_get_slave_num_gids(dev, slave, port);
  1034. base = mlx4_get_base_gid_ix(dev, slave, port);
  1035. memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
  1036. mutex_lock(&(priv->port[port].gid_table.mutex));
  1037. /* Zero-out gids belonging to that slave in the port GID table */
  1038. for (i = 0, offset = base; i < num_gids; offset++, i++)
  1039. memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
  1040. zgid_entry.raw, MLX4_ROCE_GID_ENTRY_SIZE);
  1041. /* Now, copy roce port gids table to mailbox for passing to FW */
  1042. gid_entry_mbox = (struct mlx4_roce_gid_entry *)mailbox->buf;
  1043. for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
  1044. memcpy(gid_entry_mbox->raw,
  1045. priv->port[port].gid_table.roce_gids[i].raw,
  1046. MLX4_ROCE_GID_ENTRY_SIZE);
  1047. err = mlx4_cmd(dev, mailbox->dma,
  1048. ((u32)port) | (MLX4_SET_PORT_GID_TABLE << 8),
  1049. MLX4_SET_PORT_ETH_OPCODE, MLX4_CMD_SET_PORT,
  1050. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
  1051. mutex_unlock(&(priv->port[port].gid_table.mutex));
  1052. return err;
  1053. }
  1054. void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
  1055. {
  1056. struct mlx4_active_ports actv_ports;
  1057. struct mlx4_cmd_mailbox *mailbox;
  1058. int num_eth_ports, err;
  1059. int i;
  1060. if (slave < 0 || slave > dev->persist->num_vfs)
  1061. return;
  1062. actv_ports = mlx4_get_active_ports(dev, slave);
  1063. for (i = 0, num_eth_ports = 0; i < dev->caps.num_ports; i++) {
  1064. if (test_bit(i, actv_ports.ports)) {
  1065. if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
  1066. continue;
  1067. num_eth_ports++;
  1068. }
  1069. }
  1070. if (!num_eth_ports)
  1071. return;
  1072. /* have ETH ports. Alloc mailbox for SET_PORT command */
  1073. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1074. if (IS_ERR(mailbox))
  1075. return;
  1076. for (i = 0; i < dev->caps.num_ports; i++) {
  1077. if (test_bit(i, actv_ports.ports)) {
  1078. if (dev->caps.port_type[i + 1] != MLX4_PORT_TYPE_ETH)
  1079. continue;
  1080. err = mlx4_reset_roce_port_gids(dev, slave, i + 1, mailbox);
  1081. if (err)
  1082. mlx4_warn(dev, "Could not reset ETH port GID table for slave %d, port %d (%d)\n",
  1083. slave, i + 1, err);
  1084. }
  1085. }
  1086. mlx4_free_cmd_mailbox(dev, mailbox);
  1087. return;
  1088. }
  1089. static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
  1090. u8 op_mod, struct mlx4_cmd_mailbox *inbox)
  1091. {
  1092. struct mlx4_priv *priv = mlx4_priv(dev);
  1093. struct mlx4_port_info *port_info;
  1094. struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
  1095. struct mlx4_slave_state *slave_st = &master->slave_state[slave];
  1096. struct mlx4_set_port_rqp_calc_context *qpn_context;
  1097. struct mlx4_set_port_general_context *gen_context;
  1098. struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
  1099. int reset_qkey_viols;
  1100. int port;
  1101. int is_eth;
  1102. int num_gids;
  1103. int base;
  1104. u32 in_modifier;
  1105. u32 promisc;
  1106. u16 mtu, prev_mtu;
  1107. int err;
  1108. int i, j;
  1109. int offset;
  1110. __be32 agg_cap_mask;
  1111. __be32 slave_cap_mask;
  1112. __be32 new_cap_mask;
  1113. port = in_mod & 0xff;
  1114. in_modifier = in_mod >> 8;
  1115. is_eth = op_mod;
  1116. port_info = &priv->port[port];
  1117. /* Slaves cannot perform SET_PORT operations except changing MTU */
  1118. if (is_eth) {
  1119. if (slave != dev->caps.function &&
  1120. in_modifier != MLX4_SET_PORT_GENERAL &&
  1121. in_modifier != MLX4_SET_PORT_GID_TABLE) {
  1122. mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
  1123. slave);
  1124. return -EINVAL;
  1125. }
  1126. switch (in_modifier) {
  1127. case MLX4_SET_PORT_RQP_CALC:
  1128. qpn_context = inbox->buf;
  1129. qpn_context->base_qpn =
  1130. cpu_to_be32(port_info->base_qpn);
  1131. qpn_context->n_mac = 0x7;
  1132. promisc = be32_to_cpu(qpn_context->promisc) >>
  1133. SET_PORT_PROMISC_SHIFT;
  1134. qpn_context->promisc = cpu_to_be32(
  1135. promisc << SET_PORT_PROMISC_SHIFT |
  1136. port_info->base_qpn);
  1137. promisc = be32_to_cpu(qpn_context->mcast) >>
  1138. SET_PORT_MC_PROMISC_SHIFT;
  1139. qpn_context->mcast = cpu_to_be32(
  1140. promisc << SET_PORT_MC_PROMISC_SHIFT |
  1141. port_info->base_qpn);
  1142. break;
  1143. case MLX4_SET_PORT_GENERAL:
  1144. gen_context = inbox->buf;
  1145. /* Mtu is configured as the max MTU among all the
  1146. * the functions on the port. */
  1147. mtu = be16_to_cpu(gen_context->mtu);
  1148. mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
  1149. ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
  1150. prev_mtu = slave_st->mtu[port];
  1151. slave_st->mtu[port] = mtu;
  1152. if (mtu > master->max_mtu[port])
  1153. master->max_mtu[port] = mtu;
  1154. if (mtu < prev_mtu && prev_mtu ==
  1155. master->max_mtu[port]) {
  1156. slave_st->mtu[port] = mtu;
  1157. master->max_mtu[port] = mtu;
  1158. for (i = 0; i < dev->num_slaves; i++) {
  1159. master->max_mtu[port] =
  1160. max(master->max_mtu[port],
  1161. master->slave_state[i].mtu[port]);
  1162. }
  1163. }
  1164. gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
  1165. /* Slave cannot change Global Pause configuration */
  1166. if (slave != mlx4_master_func_num(dev) &&
  1167. ((gen_context->pptx != master->pptx) ||
  1168. (gen_context->pprx != master->pprx))) {
  1169. gen_context->pptx = master->pptx;
  1170. gen_context->pprx = master->pprx;
  1171. mlx4_warn(dev,
  1172. "denying Global Pause change for slave:%d\n",
  1173. slave);
  1174. } else {
  1175. master->pptx = gen_context->pptx;
  1176. master->pprx = gen_context->pprx;
  1177. }
  1178. break;
  1179. case MLX4_SET_PORT_GID_TABLE:
  1180. /* change to MULTIPLE entries: number of guest's gids
  1181. * need a FOR-loop here over number of gids the guest has.
  1182. * 1. Check no duplicates in gids passed by slave
  1183. */
  1184. num_gids = mlx4_get_slave_num_gids(dev, slave, port);
  1185. base = mlx4_get_base_gid_ix(dev, slave, port);
  1186. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  1187. for (i = 0; i < num_gids; gid_entry_mbox++, i++) {
  1188. if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
  1189. sizeof(zgid_entry)))
  1190. continue;
  1191. gid_entry_mb1 = gid_entry_mbox + 1;
  1192. for (j = i + 1; j < num_gids; gid_entry_mb1++, j++) {
  1193. if (!memcmp(gid_entry_mb1->raw,
  1194. zgid_entry.raw, sizeof(zgid_entry)))
  1195. continue;
  1196. if (!memcmp(gid_entry_mb1->raw, gid_entry_mbox->raw,
  1197. sizeof(gid_entry_mbox->raw))) {
  1198. /* found duplicate */
  1199. return -EINVAL;
  1200. }
  1201. }
  1202. }
  1203. /* 2. Check that do not have duplicates in OTHER
  1204. * entries in the port GID table
  1205. */
  1206. mutex_lock(&(priv->port[port].gid_table.mutex));
  1207. for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
  1208. if (i >= base && i < base + num_gids)
  1209. continue; /* don't compare to slave's current gids */
  1210. gid_entry_tbl = &priv->port[port].gid_table.roce_gids[i];
  1211. if (!memcmp(gid_entry_tbl->raw, zgid_entry.raw, sizeof(zgid_entry)))
  1212. continue;
  1213. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  1214. for (j = 0; j < num_gids; gid_entry_mbox++, j++) {
  1215. if (!memcmp(gid_entry_mbox->raw, zgid_entry.raw,
  1216. sizeof(zgid_entry)))
  1217. continue;
  1218. if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
  1219. sizeof(gid_entry_tbl->raw))) {
  1220. /* found duplicate */
  1221. mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
  1222. slave, i);
  1223. mutex_unlock(&(priv->port[port].gid_table.mutex));
  1224. return -EINVAL;
  1225. }
  1226. }
  1227. }
  1228. /* insert slave GIDs with memcpy, starting at slave's base index */
  1229. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  1230. for (i = 0, offset = base; i < num_gids; gid_entry_mbox++, offset++, i++)
  1231. memcpy(priv->port[port].gid_table.roce_gids[offset].raw,
  1232. gid_entry_mbox->raw, MLX4_ROCE_GID_ENTRY_SIZE);
  1233. /* Now, copy roce port gids table to current mailbox for passing to FW */
  1234. gid_entry_mbox = (struct mlx4_roce_gid_entry *)(inbox->buf);
  1235. for (i = 0; i < MLX4_ROCE_MAX_GIDS; gid_entry_mbox++, i++)
  1236. memcpy(gid_entry_mbox->raw,
  1237. priv->port[port].gid_table.roce_gids[i].raw,
  1238. MLX4_ROCE_GID_ENTRY_SIZE);
  1239. err = mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
  1240. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1241. MLX4_CMD_NATIVE);
  1242. mutex_unlock(&(priv->port[port].gid_table.mutex));
  1243. return err;
  1244. }
  1245. return mlx4_cmd(dev, inbox->dma, in_mod & 0xffff, op_mod,
  1246. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1247. MLX4_CMD_NATIVE);
  1248. }
  1249. /* Slaves are not allowed to SET_PORT beacon (LED) blink */
  1250. if (op_mod == MLX4_SET_PORT_BEACON_OPCODE) {
  1251. mlx4_warn(dev, "denying SET_PORT Beacon slave:%d\n", slave);
  1252. return -EPERM;
  1253. }
  1254. /* For IB, we only consider:
  1255. * - The capability mask, which is set to the aggregate of all
  1256. * slave function capabilities
  1257. * - The QKey violatin counter - reset according to each request.
  1258. */
  1259. if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
  1260. reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
  1261. new_cap_mask = ((__be32 *) inbox->buf)[2];
  1262. } else {
  1263. reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
  1264. new_cap_mask = ((__be32 *) inbox->buf)[1];
  1265. }
  1266. /* slave may not set the IS_SM capability for the port */
  1267. if (slave != mlx4_master_func_num(dev) &&
  1268. (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_IS_SM))
  1269. return -EINVAL;
  1270. /* No DEV_MGMT in multifunc mode */
  1271. if (mlx4_is_mfunc(dev) &&
  1272. (be32_to_cpu(new_cap_mask) & MLX4_PORT_CAP_DEV_MGMT_SUP))
  1273. return -EINVAL;
  1274. agg_cap_mask = 0;
  1275. slave_cap_mask =
  1276. priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
  1277. priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
  1278. for (i = 0; i < dev->num_slaves; i++)
  1279. agg_cap_mask |=
  1280. priv->mfunc.master.slave_state[i].ib_cap_mask[port];
  1281. /* only clear mailbox for guests. Master may be setting
  1282. * MTU or PKEY table size
  1283. */
  1284. if (slave != dev->caps.function)
  1285. memset(inbox->buf, 0, 256);
  1286. if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
  1287. *(u8 *) inbox->buf |= !!reset_qkey_viols << 6;
  1288. ((__be32 *) inbox->buf)[2] = agg_cap_mask;
  1289. } else {
  1290. ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols;
  1291. ((__be32 *) inbox->buf)[1] = agg_cap_mask;
  1292. }
  1293. err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
  1294. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
  1295. if (err)
  1296. priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
  1297. slave_cap_mask;
  1298. return err;
  1299. }
  1300. int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
  1301. struct mlx4_vhcr *vhcr,
  1302. struct mlx4_cmd_mailbox *inbox,
  1303. struct mlx4_cmd_mailbox *outbox,
  1304. struct mlx4_cmd_info *cmd)
  1305. {
  1306. int port = mlx4_slave_convert_port(
  1307. dev, slave, vhcr->in_modifier & 0xFF);
  1308. if (port < 0)
  1309. return -EINVAL;
  1310. vhcr->in_modifier = (vhcr->in_modifier & ~0xFF) |
  1311. (port & 0xFF);
  1312. return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
  1313. vhcr->op_modifier, inbox);
  1314. }
  1315. /* bit locations for set port command with zero op modifier */
  1316. enum {
  1317. MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */
  1318. MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */
  1319. MLX4_CHANGE_PORT_PKEY_TBL_SZ = 20,
  1320. MLX4_CHANGE_PORT_VL_CAP = 21,
  1321. MLX4_CHANGE_PORT_MTU_CAP = 22,
  1322. };
  1323. int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port, int pkey_tbl_sz)
  1324. {
  1325. struct mlx4_cmd_mailbox *mailbox;
  1326. int err, vl_cap, pkey_tbl_flag = 0;
  1327. if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
  1328. return 0;
  1329. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1330. if (IS_ERR(mailbox))
  1331. return PTR_ERR(mailbox);
  1332. ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
  1333. if (pkey_tbl_sz >= 0 && mlx4_is_master(dev)) {
  1334. pkey_tbl_flag = 1;
  1335. ((__be16 *) mailbox->buf)[20] = cpu_to_be16(pkey_tbl_sz);
  1336. }
  1337. /* IB VL CAP enum isn't used by the firmware, just numerical values */
  1338. for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) {
  1339. ((__be32 *) mailbox->buf)[0] = cpu_to_be32(
  1340. (1 << MLX4_CHANGE_PORT_MTU_CAP) |
  1341. (1 << MLX4_CHANGE_PORT_VL_CAP) |
  1342. (pkey_tbl_flag << MLX4_CHANGE_PORT_PKEY_TBL_SZ) |
  1343. (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) |
  1344. (vl_cap << MLX4_SET_PORT_VL_CAP));
  1345. err = mlx4_cmd(dev, mailbox->dma, port,
  1346. MLX4_SET_PORT_IB_OPCODE, MLX4_CMD_SET_PORT,
  1347. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
  1348. if (err != -ENOMEM)
  1349. break;
  1350. }
  1351. mlx4_free_cmd_mailbox(dev, mailbox);
  1352. return err;
  1353. }
  1354. #define SET_PORT_ROCE_2_FLAGS 0x10
  1355. #define MLX4_SET_PORT_ROCE_V1_V2 0x2
  1356. int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
  1357. u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
  1358. {
  1359. struct mlx4_cmd_mailbox *mailbox;
  1360. struct mlx4_set_port_general_context *context;
  1361. int err;
  1362. u32 in_mod;
  1363. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1364. if (IS_ERR(mailbox))
  1365. return PTR_ERR(mailbox);
  1366. context = mailbox->buf;
  1367. context->flags = SET_PORT_GEN_ALL_VALID;
  1368. context->mtu = cpu_to_be16(mtu);
  1369. context->pptx = (pptx * (!pfctx)) << 7;
  1370. context->pfctx = pfctx;
  1371. context->pprx = (pprx * (!pfcrx)) << 7;
  1372. context->pfcrx = pfcrx;
  1373. if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) {
  1374. context->flags |= SET_PORT_ROCE_2_FLAGS;
  1375. context->roce_mode |=
  1376. MLX4_SET_PORT_ROCE_V1_V2 << 4;
  1377. }
  1378. in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
  1379. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  1380. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1381. MLX4_CMD_WRAPPED);
  1382. mlx4_free_cmd_mailbox(dev, mailbox);
  1383. return err;
  1384. }
  1385. EXPORT_SYMBOL(mlx4_SET_PORT_general);
  1386. int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
  1387. u8 promisc)
  1388. {
  1389. struct mlx4_cmd_mailbox *mailbox;
  1390. struct mlx4_set_port_rqp_calc_context *context;
  1391. int err;
  1392. u32 in_mod;
  1393. u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
  1394. MCAST_DIRECT : MCAST_DEFAULT;
  1395. if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
  1396. return 0;
  1397. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1398. if (IS_ERR(mailbox))
  1399. return PTR_ERR(mailbox);
  1400. context = mailbox->buf;
  1401. context->base_qpn = cpu_to_be32(base_qpn);
  1402. context->n_mac = dev->caps.log_num_macs;
  1403. context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
  1404. base_qpn);
  1405. context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
  1406. base_qpn);
  1407. context->intra_no_vlan = 0;
  1408. context->no_vlan = MLX4_NO_VLAN_IDX;
  1409. context->intra_vlan_miss = 0;
  1410. context->vlan_miss = MLX4_VLAN_MISS_IDX;
  1411. in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
  1412. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  1413. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1414. MLX4_CMD_WRAPPED);
  1415. mlx4_free_cmd_mailbox(dev, mailbox);
  1416. return err;
  1417. }
  1418. EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
  1419. int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
  1420. {
  1421. struct mlx4_cmd_mailbox *mailbox;
  1422. struct mlx4_set_port_general_context *context;
  1423. u32 in_mod;
  1424. int err;
  1425. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1426. if (IS_ERR(mailbox))
  1427. return PTR_ERR(mailbox);
  1428. context = mailbox->buf;
  1429. context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK;
  1430. if (ignore_fcs_value)
  1431. context->ignore_fcs |= MLX4_IGNORE_FCS_MASK;
  1432. else
  1433. context->ignore_fcs &= ~MLX4_IGNORE_FCS_MASK;
  1434. in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
  1435. err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
  1436. MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
  1437. mlx4_free_cmd_mailbox(dev, mailbox);
  1438. return err;
  1439. }
  1440. EXPORT_SYMBOL(mlx4_SET_PORT_fcs_check);
  1441. enum {
  1442. VXLAN_ENABLE_MODIFY = 1 << 7,
  1443. VXLAN_STEERING_MODIFY = 1 << 6,
  1444. VXLAN_ENABLE = 1 << 7,
  1445. };
  1446. struct mlx4_set_port_vxlan_context {
  1447. u32 reserved1;
  1448. u8 modify_flags;
  1449. u8 reserved2;
  1450. u8 enable_flags;
  1451. u8 steering;
  1452. };
  1453. int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering, int enable)
  1454. {
  1455. int err;
  1456. u32 in_mod;
  1457. struct mlx4_cmd_mailbox *mailbox;
  1458. struct mlx4_set_port_vxlan_context *context;
  1459. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1460. if (IS_ERR(mailbox))
  1461. return PTR_ERR(mailbox);
  1462. context = mailbox->buf;
  1463. memset(context, 0, sizeof(*context));
  1464. context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY;
  1465. if (enable)
  1466. context->enable_flags = VXLAN_ENABLE;
  1467. context->steering = steering;
  1468. in_mod = MLX4_SET_PORT_VXLAN << 8 | port;
  1469. err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
  1470. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1471. MLX4_CMD_NATIVE);
  1472. mlx4_free_cmd_mailbox(dev, mailbox);
  1473. return err;
  1474. }
  1475. EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN);
  1476. int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time)
  1477. {
  1478. int err;
  1479. struct mlx4_cmd_mailbox *mailbox;
  1480. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1481. if (IS_ERR(mailbox))
  1482. return PTR_ERR(mailbox);
  1483. *((__be32 *)mailbox->buf) = cpu_to_be32(time);
  1484. err = mlx4_cmd(dev, mailbox->dma, port, MLX4_SET_PORT_BEACON_OPCODE,
  1485. MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
  1486. MLX4_CMD_NATIVE);
  1487. mlx4_free_cmd_mailbox(dev, mailbox);
  1488. return err;
  1489. }
  1490. EXPORT_SYMBOL(mlx4_SET_PORT_BEACON);
  1491. int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
  1492. struct mlx4_vhcr *vhcr,
  1493. struct mlx4_cmd_mailbox *inbox,
  1494. struct mlx4_cmd_mailbox *outbox,
  1495. struct mlx4_cmd_info *cmd)
  1496. {
  1497. int err = 0;
  1498. return err;
  1499. }
  1500. int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
  1501. u64 mac, u64 clear, u8 mode)
  1502. {
  1503. return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
  1504. MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
  1505. MLX4_CMD_WRAPPED);
  1506. }
  1507. EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
  1508. int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
  1509. struct mlx4_vhcr *vhcr,
  1510. struct mlx4_cmd_mailbox *inbox,
  1511. struct mlx4_cmd_mailbox *outbox,
  1512. struct mlx4_cmd_info *cmd)
  1513. {
  1514. int err = 0;
  1515. return err;
  1516. }
  1517. int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
  1518. struct mlx4_vhcr *vhcr,
  1519. struct mlx4_cmd_mailbox *inbox,
  1520. struct mlx4_cmd_mailbox *outbox,
  1521. struct mlx4_cmd_info *cmd)
  1522. {
  1523. return 0;
  1524. }
  1525. int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
  1526. int *slave_id)
  1527. {
  1528. struct mlx4_priv *priv = mlx4_priv(dev);
  1529. int i, found_ix = -1;
  1530. int vf_gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
  1531. struct mlx4_slaves_pport slaves_pport;
  1532. unsigned num_vfs;
  1533. int slave_gid;
  1534. if (!mlx4_is_mfunc(dev))
  1535. return -EINVAL;
  1536. slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
  1537. num_vfs = bitmap_weight(slaves_pport.slaves,
  1538. dev->persist->num_vfs + 1) - 1;
  1539. for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
  1540. if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
  1541. MLX4_ROCE_GID_ENTRY_SIZE)) {
  1542. found_ix = i;
  1543. break;
  1544. }
  1545. }
  1546. if (found_ix >= 0) {
  1547. /* Calculate a slave_gid which is the slave number in the gid
  1548. * table and not a globally unique slave number.
  1549. */
  1550. if (found_ix < MLX4_ROCE_PF_GIDS)
  1551. slave_gid = 0;
  1552. else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
  1553. (vf_gids / num_vfs + 1))
  1554. slave_gid = ((found_ix - MLX4_ROCE_PF_GIDS) /
  1555. (vf_gids / num_vfs + 1)) + 1;
  1556. else
  1557. slave_gid =
  1558. ((found_ix - MLX4_ROCE_PF_GIDS -
  1559. ((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
  1560. (vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
  1561. /* Calculate the globally unique slave id */
  1562. if (slave_gid) {
  1563. struct mlx4_active_ports exclusive_ports;
  1564. struct mlx4_active_ports actv_ports;
  1565. struct mlx4_slaves_pport slaves_pport_actv;
  1566. unsigned max_port_p_one;
  1567. int num_vfs_before = 0;
  1568. int candidate_slave_gid;
  1569. /* Calculate how many VFs are on the previous port, if exists */
  1570. for (i = 1; i < port; i++) {
  1571. bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
  1572. set_bit(i - 1, exclusive_ports.ports);
  1573. slaves_pport_actv =
  1574. mlx4_phys_to_slaves_pport_actv(
  1575. dev, &exclusive_ports);
  1576. num_vfs_before += bitmap_weight(
  1577. slaves_pport_actv.slaves,
  1578. dev->persist->num_vfs + 1);
  1579. }
  1580. /* candidate_slave_gid isn't necessarily the correct slave, but
  1581. * it has the same number of ports and is assigned to the same
  1582. * ports as the real slave we're looking for. On dual port VF,
  1583. * slave_gid = [single port VFs on port <port>] +
  1584. * [offset of the current slave from the first dual port VF] +
  1585. * 1 (for the PF).
  1586. */
  1587. candidate_slave_gid = slave_gid + num_vfs_before;
  1588. actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
  1589. max_port_p_one = find_first_bit(
  1590. actv_ports.ports, dev->caps.num_ports) +
  1591. bitmap_weight(actv_ports.ports,
  1592. dev->caps.num_ports) + 1;
  1593. /* Calculate the real slave number */
  1594. for (i = 1; i < max_port_p_one; i++) {
  1595. if (i == port)
  1596. continue;
  1597. bitmap_zero(exclusive_ports.ports,
  1598. dev->caps.num_ports);
  1599. set_bit(i - 1, exclusive_ports.ports);
  1600. slaves_pport_actv =
  1601. mlx4_phys_to_slaves_pport_actv(
  1602. dev, &exclusive_ports);
  1603. slave_gid += bitmap_weight(
  1604. slaves_pport_actv.slaves,
  1605. dev->persist->num_vfs + 1);
  1606. }
  1607. }
  1608. *slave_id = slave_gid;
  1609. }
  1610. return (found_ix >= 0) ? 0 : -EINVAL;
  1611. }
  1612. EXPORT_SYMBOL(mlx4_get_slave_from_roce_gid);
  1613. int mlx4_get_roce_gid_from_slave(struct mlx4_dev *dev, int port, int slave_id,
  1614. u8 *gid)
  1615. {
  1616. struct mlx4_priv *priv = mlx4_priv(dev);
  1617. if (!mlx4_is_master(dev))
  1618. return -EINVAL;
  1619. memcpy(gid, priv->port[port].gid_table.roce_gids[slave_id].raw,
  1620. MLX4_ROCE_GID_ENTRY_SIZE);
  1621. return 0;
  1622. }
  1623. EXPORT_SYMBOL(mlx4_get_roce_gid_from_slave);
  1624. /* Cable Module Info */
  1625. #define MODULE_INFO_MAX_READ 48
  1626. #define I2C_ADDR_LOW 0x50
  1627. #define I2C_ADDR_HIGH 0x51
  1628. #define I2C_PAGE_SIZE 256
  1629. /* Module Info Data */
  1630. struct mlx4_cable_info {
  1631. u8 i2c_addr;
  1632. u8 page_num;
  1633. __be16 dev_mem_address;
  1634. __be16 reserved1;
  1635. __be16 size;
  1636. __be32 reserved2[2];
  1637. u8 data[MODULE_INFO_MAX_READ];
  1638. };
  1639. enum cable_info_err {
  1640. CABLE_INF_INV_PORT = 0x1,
  1641. CABLE_INF_OP_NOSUP = 0x2,
  1642. CABLE_INF_NOT_CONN = 0x3,
  1643. CABLE_INF_NO_EEPRM = 0x4,
  1644. CABLE_INF_PAGE_ERR = 0x5,
  1645. CABLE_INF_INV_ADDR = 0x6,
  1646. CABLE_INF_I2C_ADDR = 0x7,
  1647. CABLE_INF_QSFP_VIO = 0x8,
  1648. CABLE_INF_I2C_BUSY = 0x9,
  1649. };
  1650. #define MAD_STATUS_2_CABLE_ERR(mad_status) ((mad_status >> 8) & 0xFF)
  1651. static inline const char *cable_info_mad_err_str(u16 mad_status)
  1652. {
  1653. u8 err = MAD_STATUS_2_CABLE_ERR(mad_status);
  1654. switch (err) {
  1655. case CABLE_INF_INV_PORT:
  1656. return "invalid port selected";
  1657. case CABLE_INF_OP_NOSUP:
  1658. return "operation not supported for this port (the port is of type CX4 or internal)";
  1659. case CABLE_INF_NOT_CONN:
  1660. return "cable is not connected";
  1661. case CABLE_INF_NO_EEPRM:
  1662. return "the connected cable has no EPROM (passive copper cable)";
  1663. case CABLE_INF_PAGE_ERR:
  1664. return "page number is greater than 15";
  1665. case CABLE_INF_INV_ADDR:
  1666. return "invalid device_address or size (that is, size equals 0 or address+size is greater than 256)";
  1667. case CABLE_INF_I2C_ADDR:
  1668. return "invalid I2C slave address";
  1669. case CABLE_INF_QSFP_VIO:
  1670. return "at least one cable violates the QSFP specification and ignores the modsel signal";
  1671. case CABLE_INF_I2C_BUSY:
  1672. return "I2C bus is constantly busy";
  1673. }
  1674. return "Unknown Error";
  1675. }
  1676. /**
  1677. * mlx4_get_module_info - Read cable module eeprom data
  1678. * @dev: mlx4_dev.
  1679. * @port: port number.
  1680. * @offset: byte offset in eeprom to start reading data from.
  1681. * @size: num of bytes to read.
  1682. * @data: output buffer to put the requested data into.
  1683. *
  1684. * Reads cable module eeprom data, puts the outcome data into
  1685. * data pointer paramer.
  1686. * Returns num of read bytes on success or a negative error
  1687. * code.
  1688. */
  1689. int mlx4_get_module_info(struct mlx4_dev *dev, u8 port,
  1690. u16 offset, u16 size, u8 *data)
  1691. {
  1692. struct mlx4_cmd_mailbox *inbox, *outbox;
  1693. struct mlx4_mad_ifc *inmad, *outmad;
  1694. struct mlx4_cable_info *cable_info;
  1695. u16 i2c_addr;
  1696. int ret;
  1697. if (size > MODULE_INFO_MAX_READ)
  1698. size = MODULE_INFO_MAX_READ;
  1699. inbox = mlx4_alloc_cmd_mailbox(dev);
  1700. if (IS_ERR(inbox))
  1701. return PTR_ERR(inbox);
  1702. outbox = mlx4_alloc_cmd_mailbox(dev);
  1703. if (IS_ERR(outbox)) {
  1704. mlx4_free_cmd_mailbox(dev, inbox);
  1705. return PTR_ERR(outbox);
  1706. }
  1707. inmad = (struct mlx4_mad_ifc *)(inbox->buf);
  1708. outmad = (struct mlx4_mad_ifc *)(outbox->buf);
  1709. inmad->method = 0x1; /* Get */
  1710. inmad->class_version = 0x1;
  1711. inmad->mgmt_class = 0x1;
  1712. inmad->base_version = 0x1;
  1713. inmad->attr_id = cpu_to_be16(0xFF60); /* Module Info */
  1714. if (offset < I2C_PAGE_SIZE && offset + size > I2C_PAGE_SIZE)
  1715. /* Cross pages reads are not allowed
  1716. * read until offset 256 in low page
  1717. */
  1718. size -= offset + size - I2C_PAGE_SIZE;
  1719. i2c_addr = I2C_ADDR_LOW;
  1720. if (offset >= I2C_PAGE_SIZE) {
  1721. /* Reset offset to high page */
  1722. i2c_addr = I2C_ADDR_HIGH;
  1723. offset -= I2C_PAGE_SIZE;
  1724. }
  1725. cable_info = (struct mlx4_cable_info *)inmad->data;
  1726. cable_info->dev_mem_address = cpu_to_be16(offset);
  1727. cable_info->page_num = 0;
  1728. cable_info->i2c_addr = i2c_addr;
  1729. cable_info->size = cpu_to_be16(size);
  1730. ret = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
  1731. MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
  1732. MLX4_CMD_NATIVE);
  1733. if (ret)
  1734. goto out;
  1735. if (be16_to_cpu(outmad->status)) {
  1736. /* Mad returned with bad status */
  1737. ret = be16_to_cpu(outmad->status);
  1738. mlx4_warn(dev,
  1739. "MLX4_CMD_MAD_IFC Get Module info attr(%x) port(%d) i2c_addr(%x) offset(%d) size(%d): Response Mad Status(%x) - %s\n",
  1740. 0xFF60, port, i2c_addr, offset, size,
  1741. ret, cable_info_mad_err_str(ret));
  1742. if (i2c_addr == I2C_ADDR_HIGH &&
  1743. MAD_STATUS_2_CABLE_ERR(ret) == CABLE_INF_I2C_ADDR)
  1744. /* Some SFP cables do not support i2c slave
  1745. * address 0x51 (high page), abort silently.
  1746. */
  1747. ret = 0;
  1748. else
  1749. ret = -ret;
  1750. goto out;
  1751. }
  1752. cable_info = (struct mlx4_cable_info *)outmad->data;
  1753. memcpy(data, cable_info->data, size);
  1754. ret = size;
  1755. out:
  1756. mlx4_free_cmd_mailbox(dev, inbox);
  1757. mlx4_free_cmd_mailbox(dev, outbox);
  1758. return ret;
  1759. }
  1760. EXPORT_SYMBOL(mlx4_get_module_info);
  1761. int mlx4_max_tc(struct mlx4_dev *dev)
  1762. {
  1763. u8 num_tc = dev->caps.max_tc_eth;
  1764. if (!num_tc)
  1765. num_tc = MLX4_TC_MAX_NUMBER;
  1766. return num_tc;
  1767. }
  1768. EXPORT_SYMBOL(mlx4_max_tc);