core.c 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906
  1. // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
  2. /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
  3. #include <linux/kernel.h>
  4. #include <linux/module.h>
  5. #include <linux/device.h>
  6. #include <linux/export.h>
  7. #include <linux/err.h>
  8. #include <linux/if_link.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/completion.h>
  11. #include <linux/skbuff.h>
  12. #include <linux/etherdevice.h>
  13. #include <linux/types.h>
  14. #include <linux/string.h>
  15. #include <linux/gfp.h>
  16. #include <linux/random.h>
  17. #include <linux/jiffies.h>
  18. #include <linux/mutex.h>
  19. #include <linux/rcupdate.h>
  20. #include <linux/slab.h>
  21. #include <linux/workqueue.h>
  22. #include <asm/byteorder.h>
  23. #include <net/devlink.h>
  24. #include <trace/events/devlink.h>
  25. #include "core.h"
  26. #include "item.h"
  27. #include "cmd.h"
  28. #include "port.h"
  29. #include "trap.h"
  30. #include "emad.h"
  31. #include "reg.h"
  32. #include "resources.h"
  33. static LIST_HEAD(mlxsw_core_driver_list);
  34. static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
  35. static const char mlxsw_core_driver_name[] = "mlxsw_core";
  36. static struct workqueue_struct *mlxsw_wq;
  37. static struct workqueue_struct *mlxsw_owq;
  38. struct mlxsw_core_port {
  39. struct devlink_port devlink_port;
  40. void *port_driver_priv;
  41. u8 local_port;
  42. };
  43. void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
  44. {
  45. return mlxsw_core_port->port_driver_priv;
  46. }
  47. EXPORT_SYMBOL(mlxsw_core_port_driver_priv);
  48. static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port)
  49. {
  50. return mlxsw_core_port->port_driver_priv != NULL;
  51. }
  52. struct mlxsw_core {
  53. struct mlxsw_driver *driver;
  54. const struct mlxsw_bus *bus;
  55. void *bus_priv;
  56. const struct mlxsw_bus_info *bus_info;
  57. struct workqueue_struct *emad_wq;
  58. struct list_head rx_listener_list;
  59. struct list_head event_listener_list;
  60. struct {
  61. atomic64_t tid;
  62. struct list_head trans_list;
  63. spinlock_t trans_list_lock; /* protects trans_list writes */
  64. bool use_emad;
  65. } emad;
  66. struct {
  67. u8 *mapping; /* lag_id+port_index to local_port mapping */
  68. } lag;
  69. struct mlxsw_res res;
  70. struct mlxsw_hwmon *hwmon;
  71. struct mlxsw_thermal *thermal;
  72. struct mlxsw_core_port *ports;
  73. unsigned int max_ports;
  74. bool reload_fail;
  75. bool fw_flash_in_progress;
  76. unsigned long driver_priv[0];
  77. /* driver_priv has to be always the last item */
  78. };
  79. #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
  80. static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
  81. {
  82. /* Switch ports are numbered from 1 to queried value */
  83. if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
  84. mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
  85. MAX_SYSTEM_PORT) + 1;
  86. else
  87. mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
  88. mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
  89. sizeof(struct mlxsw_core_port), GFP_KERNEL);
  90. if (!mlxsw_core->ports)
  91. return -ENOMEM;
  92. return 0;
  93. }
  94. static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core)
  95. {
  96. kfree(mlxsw_core->ports);
  97. }
  98. unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
  99. {
  100. return mlxsw_core->max_ports;
  101. }
  102. EXPORT_SYMBOL(mlxsw_core_max_ports);
  103. void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
  104. {
  105. return mlxsw_core->driver_priv;
  106. }
  107. EXPORT_SYMBOL(mlxsw_core_driver_priv);
  108. struct mlxsw_rx_listener_item {
  109. struct list_head list;
  110. struct mlxsw_rx_listener rxl;
  111. void *priv;
  112. };
  113. struct mlxsw_event_listener_item {
  114. struct list_head list;
  115. struct mlxsw_event_listener el;
  116. void *priv;
  117. };
  118. /******************
  119. * EMAD processing
  120. ******************/
  121. /* emad_eth_hdr_dmac
  122. * Destination MAC in EMAD's Ethernet header.
  123. * Must be set to 01:02:c9:00:00:01
  124. */
  125. MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
  126. /* emad_eth_hdr_smac
  127. * Source MAC in EMAD's Ethernet header.
  128. * Must be set to 00:02:c9:01:02:03
  129. */
  130. MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
  131. /* emad_eth_hdr_ethertype
  132. * Ethertype in EMAD's Ethernet header.
  133. * Must be set to 0x8932
  134. */
  135. MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
  136. /* emad_eth_hdr_mlx_proto
  137. * Mellanox protocol.
  138. * Must be set to 0x0.
  139. */
  140. MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
  141. /* emad_eth_hdr_ver
  142. * Mellanox protocol version.
  143. * Must be set to 0x0.
  144. */
  145. MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
  146. /* emad_op_tlv_type
  147. * Type of the TLV.
  148. * Must be set to 0x1 (operation TLV).
  149. */
  150. MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
  151. /* emad_op_tlv_len
  152. * Length of the operation TLV in u32.
  153. * Must be set to 0x4.
  154. */
  155. MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
  156. /* emad_op_tlv_dr
  157. * Direct route bit. Setting to 1 indicates the EMAD is a direct route
  158. * EMAD. DR TLV must follow.
  159. *
  160. * Note: Currently not supported and must not be set.
  161. */
  162. MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
  163. /* emad_op_tlv_status
  164. * Returned status in case of EMAD response. Must be set to 0 in case
  165. * of EMAD request.
  166. * 0x0 - success
  167. * 0x1 - device is busy. Requester should retry
  168. * 0x2 - Mellanox protocol version not supported
  169. * 0x3 - unknown TLV
  170. * 0x4 - register not supported
  171. * 0x5 - operation class not supported
  172. * 0x6 - EMAD method not supported
  173. * 0x7 - bad parameter (e.g. port out of range)
  174. * 0x8 - resource not available
  175. * 0x9 - message receipt acknowledgment. Requester should retry
  176. * 0x70 - internal error
  177. */
  178. MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
  179. /* emad_op_tlv_register_id
  180. * Register ID of register within register TLV.
  181. */
  182. MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
  183. /* emad_op_tlv_r
  184. * Response bit. Setting to 1 indicates Response, otherwise request.
  185. */
  186. MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
  187. /* emad_op_tlv_method
  188. * EMAD method type.
  189. * 0x1 - query
  190. * 0x2 - write
  191. * 0x3 - send (currently not supported)
  192. * 0x4 - event
  193. */
  194. MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
  195. /* emad_op_tlv_class
  196. * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
  197. */
  198. MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
  199. /* emad_op_tlv_tid
  200. * EMAD transaction ID. Used for pairing request and response EMADs.
  201. */
  202. MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
  203. /* emad_reg_tlv_type
  204. * Type of the TLV.
  205. * Must be set to 0x3 (register TLV).
  206. */
  207. MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
  208. /* emad_reg_tlv_len
  209. * Length of the operation TLV in u32.
  210. */
  211. MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
  212. /* emad_end_tlv_type
  213. * Type of the TLV.
  214. * Must be set to 0x0 (end TLV).
  215. */
  216. MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
  217. /* emad_end_tlv_len
  218. * Length of the end TLV in u32.
  219. * Must be set to 1.
  220. */
  221. MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
  222. enum mlxsw_core_reg_access_type {
  223. MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
  224. MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
  225. };
  226. static inline const char *
  227. mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
  228. {
  229. switch (type) {
  230. case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
  231. return "query";
  232. case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
  233. return "write";
  234. }
  235. BUG();
  236. }
  237. static void mlxsw_emad_pack_end_tlv(char *end_tlv)
  238. {
  239. mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
  240. mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
  241. }
  242. static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
  243. const struct mlxsw_reg_info *reg,
  244. char *payload)
  245. {
  246. mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
  247. mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
  248. memcpy(reg_tlv + sizeof(u32), payload, reg->len);
  249. }
  250. static void mlxsw_emad_pack_op_tlv(char *op_tlv,
  251. const struct mlxsw_reg_info *reg,
  252. enum mlxsw_core_reg_access_type type,
  253. u64 tid)
  254. {
  255. mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
  256. mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
  257. mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
  258. mlxsw_emad_op_tlv_status_set(op_tlv, 0);
  259. mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
  260. mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
  261. if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
  262. mlxsw_emad_op_tlv_method_set(op_tlv,
  263. MLXSW_EMAD_OP_TLV_METHOD_QUERY);
  264. else
  265. mlxsw_emad_op_tlv_method_set(op_tlv,
  266. MLXSW_EMAD_OP_TLV_METHOD_WRITE);
  267. mlxsw_emad_op_tlv_class_set(op_tlv,
  268. MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
  269. mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
  270. }
  271. static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
  272. {
  273. char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
  274. mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
  275. mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
  276. mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
  277. mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
  278. mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
  279. skb_reset_mac_header(skb);
  280. return 0;
  281. }
  282. static void mlxsw_emad_construct(struct sk_buff *skb,
  283. const struct mlxsw_reg_info *reg,
  284. char *payload,
  285. enum mlxsw_core_reg_access_type type,
  286. u64 tid)
  287. {
  288. char *buf;
  289. buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
  290. mlxsw_emad_pack_end_tlv(buf);
  291. buf = skb_push(skb, reg->len + sizeof(u32));
  292. mlxsw_emad_pack_reg_tlv(buf, reg, payload);
  293. buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
  294. mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
  295. mlxsw_emad_construct_eth_hdr(skb);
  296. }
  297. static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
  298. {
  299. return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
  300. }
  301. static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
  302. {
  303. return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
  304. MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
  305. }
  306. static char *mlxsw_emad_reg_payload(const char *op_tlv)
  307. {
  308. return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
  309. }
  310. static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
  311. {
  312. char *op_tlv;
  313. op_tlv = mlxsw_emad_op_tlv(skb);
  314. return mlxsw_emad_op_tlv_tid_get(op_tlv);
  315. }
  316. static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
  317. {
  318. char *op_tlv;
  319. op_tlv = mlxsw_emad_op_tlv(skb);
  320. return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
  321. }
  322. static int mlxsw_emad_process_status(char *op_tlv,
  323. enum mlxsw_emad_op_tlv_status *p_status)
  324. {
  325. *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
  326. switch (*p_status) {
  327. case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
  328. return 0;
  329. case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
  330. case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
  331. return -EAGAIN;
  332. case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
  333. case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
  334. case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
  335. case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
  336. case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
  337. case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
  338. case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
  339. case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
  340. default:
  341. return -EIO;
  342. }
  343. }
  344. static int
  345. mlxsw_emad_process_status_skb(struct sk_buff *skb,
  346. enum mlxsw_emad_op_tlv_status *p_status)
  347. {
  348. return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
  349. }
  350. struct mlxsw_reg_trans {
  351. struct list_head list;
  352. struct list_head bulk_list;
  353. struct mlxsw_core *core;
  354. struct sk_buff *tx_skb;
  355. struct mlxsw_tx_info tx_info;
  356. struct delayed_work timeout_dw;
  357. unsigned int retries;
  358. u64 tid;
  359. struct completion completion;
  360. atomic_t active;
  361. mlxsw_reg_trans_cb_t *cb;
  362. unsigned long cb_priv;
  363. const struct mlxsw_reg_info *reg;
  364. enum mlxsw_core_reg_access_type type;
  365. int err;
  366. enum mlxsw_emad_op_tlv_status emad_status;
  367. struct rcu_head rcu;
  368. };
  369. #define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
  370. #define MLXSW_EMAD_TIMEOUT_MS 200
  371. static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
  372. {
  373. unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
  374. if (trans->core->fw_flash_in_progress)
  375. timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
  376. queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout);
  377. }
  378. static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
  379. struct mlxsw_reg_trans *trans)
  380. {
  381. struct sk_buff *skb;
  382. int err;
  383. skb = skb_copy(trans->tx_skb, GFP_KERNEL);
  384. if (!skb)
  385. return -ENOMEM;
  386. trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
  387. skb->data + mlxsw_core->driver->txhdr_len,
  388. skb->len - mlxsw_core->driver->txhdr_len);
  389. atomic_set(&trans->active, 1);
  390. err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
  391. if (err) {
  392. dev_kfree_skb(skb);
  393. return err;
  394. }
  395. mlxsw_emad_trans_timeout_schedule(trans);
  396. return 0;
  397. }
  398. static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
  399. {
  400. struct mlxsw_core *mlxsw_core = trans->core;
  401. dev_kfree_skb(trans->tx_skb);
  402. spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
  403. list_del_rcu(&trans->list);
  404. spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
  405. trans->err = err;
  406. complete(&trans->completion);
  407. }
  408. static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
  409. struct mlxsw_reg_trans *trans)
  410. {
  411. int err;
  412. if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
  413. trans->retries++;
  414. err = mlxsw_emad_transmit(trans->core, trans);
  415. if (err == 0)
  416. return;
  417. } else {
  418. err = -EIO;
  419. }
  420. mlxsw_emad_trans_finish(trans, err);
  421. }
  422. static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
  423. {
  424. struct mlxsw_reg_trans *trans = container_of(work,
  425. struct mlxsw_reg_trans,
  426. timeout_dw.work);
  427. if (!atomic_dec_and_test(&trans->active))
  428. return;
  429. mlxsw_emad_transmit_retry(trans->core, trans);
  430. }
  431. static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
  432. struct mlxsw_reg_trans *trans,
  433. struct sk_buff *skb)
  434. {
  435. int err;
  436. if (!atomic_dec_and_test(&trans->active))
  437. return;
  438. err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
  439. if (err == -EAGAIN) {
  440. mlxsw_emad_transmit_retry(mlxsw_core, trans);
  441. } else {
  442. if (err == 0) {
  443. char *op_tlv = mlxsw_emad_op_tlv(skb);
  444. if (trans->cb)
  445. trans->cb(mlxsw_core,
  446. mlxsw_emad_reg_payload(op_tlv),
  447. trans->reg->len, trans->cb_priv);
  448. }
  449. mlxsw_emad_trans_finish(trans, err);
  450. }
  451. }
  452. /* called with rcu read lock held */
  453. static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
  454. void *priv)
  455. {
  456. struct mlxsw_core *mlxsw_core = priv;
  457. struct mlxsw_reg_trans *trans;
  458. trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
  459. skb->data, skb->len);
  460. if (!mlxsw_emad_is_resp(skb))
  461. goto free_skb;
  462. list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
  463. if (mlxsw_emad_get_tid(skb) == trans->tid) {
  464. mlxsw_emad_process_response(mlxsw_core, trans, skb);
  465. break;
  466. }
  467. }
  468. free_skb:
  469. dev_kfree_skb(skb);
  470. }
  471. static const struct mlxsw_listener mlxsw_emad_rx_listener =
  472. MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
  473. EMAD, DISCARD);
  474. static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
  475. {
  476. struct workqueue_struct *emad_wq;
  477. u64 tid;
  478. int err;
  479. if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
  480. return 0;
  481. emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
  482. if (!emad_wq)
  483. return -ENOMEM;
  484. mlxsw_core->emad_wq = emad_wq;
  485. /* Set the upper 32 bits of the transaction ID field to a random
  486. * number. This allows us to discard EMADs addressed to other
  487. * devices.
  488. */
  489. get_random_bytes(&tid, 4);
  490. tid <<= 32;
  491. atomic64_set(&mlxsw_core->emad.tid, tid);
  492. INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
  493. spin_lock_init(&mlxsw_core->emad.trans_list_lock);
  494. err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
  495. mlxsw_core);
  496. if (err)
  497. return err;
  498. err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
  499. if (err)
  500. goto err_emad_trap_set;
  501. mlxsw_core->emad.use_emad = true;
  502. return 0;
  503. err_emad_trap_set:
  504. mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
  505. mlxsw_core);
  506. destroy_workqueue(mlxsw_core->emad_wq);
  507. return err;
  508. }
  509. static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
  510. {
  511. if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
  512. return;
  513. mlxsw_core->emad.use_emad = false;
  514. mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
  515. mlxsw_core);
  516. destroy_workqueue(mlxsw_core->emad_wq);
  517. }
  518. static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
  519. u16 reg_len)
  520. {
  521. struct sk_buff *skb;
  522. u16 emad_len;
  523. emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
  524. (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
  525. sizeof(u32) + mlxsw_core->driver->txhdr_len);
  526. if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
  527. return NULL;
  528. skb = netdev_alloc_skb(NULL, emad_len);
  529. if (!skb)
  530. return NULL;
  531. memset(skb->data, 0, emad_len);
  532. skb_reserve(skb, emad_len);
  533. return skb;
  534. }
  535. static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
  536. const struct mlxsw_reg_info *reg,
  537. char *payload,
  538. enum mlxsw_core_reg_access_type type,
  539. struct mlxsw_reg_trans *trans,
  540. struct list_head *bulk_list,
  541. mlxsw_reg_trans_cb_t *cb,
  542. unsigned long cb_priv, u64 tid)
  543. {
  544. struct sk_buff *skb;
  545. int err;
  546. dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
  547. tid, reg->id, mlxsw_reg_id_str(reg->id),
  548. mlxsw_core_reg_access_type_str(type));
  549. skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
  550. if (!skb)
  551. return -ENOMEM;
  552. list_add_tail(&trans->bulk_list, bulk_list);
  553. trans->core = mlxsw_core;
  554. trans->tx_skb = skb;
  555. trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
  556. trans->tx_info.is_emad = true;
  557. INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
  558. trans->tid = tid;
  559. init_completion(&trans->completion);
  560. trans->cb = cb;
  561. trans->cb_priv = cb_priv;
  562. trans->reg = reg;
  563. trans->type = type;
  564. mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
  565. mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
  566. spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
  567. list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
  568. spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
  569. err = mlxsw_emad_transmit(mlxsw_core, trans);
  570. if (err)
  571. goto err_out;
  572. return 0;
  573. err_out:
  574. spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
  575. list_del_rcu(&trans->list);
  576. spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
  577. list_del(&trans->bulk_list);
  578. dev_kfree_skb(trans->tx_skb);
  579. return err;
  580. }
  581. /*****************
  582. * Core functions
  583. *****************/
  584. int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
  585. {
  586. spin_lock(&mlxsw_core_driver_list_lock);
  587. list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
  588. spin_unlock(&mlxsw_core_driver_list_lock);
  589. return 0;
  590. }
  591. EXPORT_SYMBOL(mlxsw_core_driver_register);
  592. void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
  593. {
  594. spin_lock(&mlxsw_core_driver_list_lock);
  595. list_del(&mlxsw_driver->list);
  596. spin_unlock(&mlxsw_core_driver_list_lock);
  597. }
  598. EXPORT_SYMBOL(mlxsw_core_driver_unregister);
  599. static struct mlxsw_driver *__driver_find(const char *kind)
  600. {
  601. struct mlxsw_driver *mlxsw_driver;
  602. list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
  603. if (strcmp(mlxsw_driver->kind, kind) == 0)
  604. return mlxsw_driver;
  605. }
  606. return NULL;
  607. }
  608. static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
  609. {
  610. struct mlxsw_driver *mlxsw_driver;
  611. spin_lock(&mlxsw_core_driver_list_lock);
  612. mlxsw_driver = __driver_find(kind);
  613. spin_unlock(&mlxsw_core_driver_list_lock);
  614. return mlxsw_driver;
  615. }
  616. static int mlxsw_devlink_port_split(struct devlink *devlink,
  617. unsigned int port_index,
  618. unsigned int count,
  619. struct netlink_ext_ack *extack)
  620. {
  621. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  622. if (port_index >= mlxsw_core->max_ports) {
  623. NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
  624. return -EINVAL;
  625. }
  626. if (!mlxsw_core->driver->port_split)
  627. return -EOPNOTSUPP;
  628. return mlxsw_core->driver->port_split(mlxsw_core, port_index, count,
  629. extack);
  630. }
  631. static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
  632. unsigned int port_index,
  633. struct netlink_ext_ack *extack)
  634. {
  635. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  636. if (port_index >= mlxsw_core->max_ports) {
  637. NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
  638. return -EINVAL;
  639. }
  640. if (!mlxsw_core->driver->port_unsplit)
  641. return -EOPNOTSUPP;
  642. return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index,
  643. extack);
  644. }
  645. static int
  646. mlxsw_devlink_sb_pool_get(struct devlink *devlink,
  647. unsigned int sb_index, u16 pool_index,
  648. struct devlink_sb_pool_info *pool_info)
  649. {
  650. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  651. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  652. if (!mlxsw_driver->sb_pool_get)
  653. return -EOPNOTSUPP;
  654. return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
  655. pool_index, pool_info);
  656. }
  657. static int
  658. mlxsw_devlink_sb_pool_set(struct devlink *devlink,
  659. unsigned int sb_index, u16 pool_index, u32 size,
  660. enum devlink_sb_threshold_type threshold_type)
  661. {
  662. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  663. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  664. if (!mlxsw_driver->sb_pool_set)
  665. return -EOPNOTSUPP;
  666. return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
  667. pool_index, size, threshold_type);
  668. }
  669. static void *__dl_port(struct devlink_port *devlink_port)
  670. {
  671. return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
  672. }
  673. static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
  674. enum devlink_port_type port_type)
  675. {
  676. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  677. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  678. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  679. if (!mlxsw_driver->port_type_set)
  680. return -EOPNOTSUPP;
  681. return mlxsw_driver->port_type_set(mlxsw_core,
  682. mlxsw_core_port->local_port,
  683. port_type);
  684. }
  685. static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
  686. unsigned int sb_index, u16 pool_index,
  687. u32 *p_threshold)
  688. {
  689. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  690. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  691. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  692. if (!mlxsw_driver->sb_port_pool_get ||
  693. !mlxsw_core_port_check(mlxsw_core_port))
  694. return -EOPNOTSUPP;
  695. return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
  696. pool_index, p_threshold);
  697. }
  698. static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
  699. unsigned int sb_index, u16 pool_index,
  700. u32 threshold)
  701. {
  702. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  703. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  704. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  705. if (!mlxsw_driver->sb_port_pool_set ||
  706. !mlxsw_core_port_check(mlxsw_core_port))
  707. return -EOPNOTSUPP;
  708. return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
  709. pool_index, threshold);
  710. }
  711. static int
  712. mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
  713. unsigned int sb_index, u16 tc_index,
  714. enum devlink_sb_pool_type pool_type,
  715. u16 *p_pool_index, u32 *p_threshold)
  716. {
  717. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  718. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  719. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  720. if (!mlxsw_driver->sb_tc_pool_bind_get ||
  721. !mlxsw_core_port_check(mlxsw_core_port))
  722. return -EOPNOTSUPP;
  723. return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
  724. tc_index, pool_type,
  725. p_pool_index, p_threshold);
  726. }
  727. static int
  728. mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
  729. unsigned int sb_index, u16 tc_index,
  730. enum devlink_sb_pool_type pool_type,
  731. u16 pool_index, u32 threshold)
  732. {
  733. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  734. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  735. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  736. if (!mlxsw_driver->sb_tc_pool_bind_set ||
  737. !mlxsw_core_port_check(mlxsw_core_port))
  738. return -EOPNOTSUPP;
  739. return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
  740. tc_index, pool_type,
  741. pool_index, threshold);
  742. }
  743. static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
  744. unsigned int sb_index)
  745. {
  746. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  747. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  748. if (!mlxsw_driver->sb_occ_snapshot)
  749. return -EOPNOTSUPP;
  750. return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
  751. }
  752. static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
  753. unsigned int sb_index)
  754. {
  755. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  756. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  757. if (!mlxsw_driver->sb_occ_max_clear)
  758. return -EOPNOTSUPP;
  759. return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
  760. }
  761. static int
  762. mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
  763. unsigned int sb_index, u16 pool_index,
  764. u32 *p_cur, u32 *p_max)
  765. {
  766. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  767. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  768. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  769. if (!mlxsw_driver->sb_occ_port_pool_get ||
  770. !mlxsw_core_port_check(mlxsw_core_port))
  771. return -EOPNOTSUPP;
  772. return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
  773. pool_index, p_cur, p_max);
  774. }
  775. static int
  776. mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
  777. unsigned int sb_index, u16 tc_index,
  778. enum devlink_sb_pool_type pool_type,
  779. u32 *p_cur, u32 *p_max)
  780. {
  781. struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
  782. struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
  783. struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
  784. if (!mlxsw_driver->sb_occ_tc_port_bind_get ||
  785. !mlxsw_core_port_check(mlxsw_core_port))
  786. return -EOPNOTSUPP;
  787. return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
  788. sb_index, tc_index,
  789. pool_type, p_cur, p_max);
  790. }
  791. static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink,
  792. struct netlink_ext_ack *extack)
  793. {
  794. struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
  795. int err;
  796. if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET))
  797. return -EOPNOTSUPP;
  798. mlxsw_core_bus_device_unregister(mlxsw_core, true);
  799. err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
  800. mlxsw_core->bus,
  801. mlxsw_core->bus_priv, true,
  802. devlink);
  803. mlxsw_core->reload_fail = !!err;
  804. return err;
  805. }
  806. static const struct devlink_ops mlxsw_devlink_ops = {
  807. .reload = mlxsw_devlink_core_bus_device_reload,
  808. .port_type_set = mlxsw_devlink_port_type_set,
  809. .port_split = mlxsw_devlink_port_split,
  810. .port_unsplit = mlxsw_devlink_port_unsplit,
  811. .sb_pool_get = mlxsw_devlink_sb_pool_get,
  812. .sb_pool_set = mlxsw_devlink_sb_pool_set,
  813. .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
  814. .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
  815. .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
  816. .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
  817. .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
  818. .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
  819. .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
  820. .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
  821. };
  822. int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
  823. const struct mlxsw_bus *mlxsw_bus,
  824. void *bus_priv, bool reload,
  825. struct devlink *devlink)
  826. {
  827. const char *device_kind = mlxsw_bus_info->device_kind;
  828. struct mlxsw_core *mlxsw_core;
  829. struct mlxsw_driver *mlxsw_driver;
  830. struct mlxsw_res *res;
  831. size_t alloc_size;
  832. int err;
  833. mlxsw_driver = mlxsw_core_driver_get(device_kind);
  834. if (!mlxsw_driver)
  835. return -EINVAL;
  836. if (!reload) {
  837. alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
  838. devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
  839. if (!devlink) {
  840. err = -ENOMEM;
  841. goto err_devlink_alloc;
  842. }
  843. }
  844. mlxsw_core = devlink_priv(devlink);
  845. INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
  846. INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
  847. mlxsw_core->driver = mlxsw_driver;
  848. mlxsw_core->bus = mlxsw_bus;
  849. mlxsw_core->bus_priv = bus_priv;
  850. mlxsw_core->bus_info = mlxsw_bus_info;
  851. res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL;
  852. err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res);
  853. if (err)
  854. goto err_bus_init;
  855. if (mlxsw_driver->resources_register && !reload) {
  856. err = mlxsw_driver->resources_register(mlxsw_core);
  857. if (err)
  858. goto err_register_resources;
  859. }
  860. err = mlxsw_ports_init(mlxsw_core);
  861. if (err)
  862. goto err_ports_init;
  863. if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
  864. MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
  865. alloc_size = sizeof(u8) *
  866. MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
  867. MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
  868. mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
  869. if (!mlxsw_core->lag.mapping) {
  870. err = -ENOMEM;
  871. goto err_alloc_lag_mapping;
  872. }
  873. }
  874. err = mlxsw_emad_init(mlxsw_core);
  875. if (err)
  876. goto err_emad_init;
  877. if (!reload) {
  878. err = devlink_register(devlink, mlxsw_bus_info->dev);
  879. if (err)
  880. goto err_devlink_register;
  881. }
  882. err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
  883. if (err)
  884. goto err_hwmon_init;
  885. err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info,
  886. &mlxsw_core->thermal);
  887. if (err)
  888. goto err_thermal_init;
  889. if (mlxsw_driver->init) {
  890. err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
  891. if (err)
  892. goto err_driver_init;
  893. }
  894. return 0;
  895. err_driver_init:
  896. mlxsw_thermal_fini(mlxsw_core->thermal);
  897. err_thermal_init:
  898. mlxsw_hwmon_fini(mlxsw_core->hwmon);
  899. err_hwmon_init:
  900. if (!reload)
  901. devlink_unregister(devlink);
  902. err_devlink_register:
  903. mlxsw_emad_fini(mlxsw_core);
  904. err_emad_init:
  905. kfree(mlxsw_core->lag.mapping);
  906. err_alloc_lag_mapping:
  907. mlxsw_ports_fini(mlxsw_core);
  908. err_ports_init:
  909. if (!reload)
  910. devlink_resources_unregister(devlink, NULL);
  911. err_register_resources:
  912. mlxsw_bus->fini(bus_priv);
  913. err_bus_init:
  914. if (!reload)
  915. devlink_free(devlink);
  916. err_devlink_alloc:
  917. return err;
  918. }
  919. EXPORT_SYMBOL(mlxsw_core_bus_device_register);
  920. void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
  921. bool reload)
  922. {
  923. struct devlink *devlink = priv_to_devlink(mlxsw_core);
  924. if (mlxsw_core->reload_fail) {
  925. if (!reload)
  926. /* Only the parts that were not de-initialized in the
  927. * failed reload attempt need to be de-initialized.
  928. */
  929. goto reload_fail_deinit;
  930. else
  931. return;
  932. }
  933. if (mlxsw_core->driver->fini)
  934. mlxsw_core->driver->fini(mlxsw_core);
  935. mlxsw_thermal_fini(mlxsw_core->thermal);
  936. mlxsw_hwmon_fini(mlxsw_core->hwmon);
  937. if (!reload)
  938. devlink_unregister(devlink);
  939. mlxsw_emad_fini(mlxsw_core);
  940. kfree(mlxsw_core->lag.mapping);
  941. mlxsw_ports_fini(mlxsw_core);
  942. if (!reload)
  943. devlink_resources_unregister(devlink, NULL);
  944. mlxsw_core->bus->fini(mlxsw_core->bus_priv);
  945. return;
  946. reload_fail_deinit:
  947. devlink_unregister(devlink);
  948. devlink_resources_unregister(devlink, NULL);
  949. devlink_free(devlink);
  950. }
  951. EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
  952. bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
  953. const struct mlxsw_tx_info *tx_info)
  954. {
  955. return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
  956. tx_info);
  957. }
  958. EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
  959. int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
  960. const struct mlxsw_tx_info *tx_info)
  961. {
  962. return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
  963. tx_info);
  964. }
  965. EXPORT_SYMBOL(mlxsw_core_skb_transmit);
  966. static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
  967. const struct mlxsw_rx_listener *rxl_b)
  968. {
  969. return (rxl_a->func == rxl_b->func &&
  970. rxl_a->local_port == rxl_b->local_port &&
  971. rxl_a->trap_id == rxl_b->trap_id);
  972. }
  973. static struct mlxsw_rx_listener_item *
  974. __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
  975. const struct mlxsw_rx_listener *rxl,
  976. void *priv)
  977. {
  978. struct mlxsw_rx_listener_item *rxl_item;
  979. list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
  980. if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
  981. rxl_item->priv == priv)
  982. return rxl_item;
  983. }
  984. return NULL;
  985. }
  986. int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
  987. const struct mlxsw_rx_listener *rxl,
  988. void *priv)
  989. {
  990. struct mlxsw_rx_listener_item *rxl_item;
  991. rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
  992. if (rxl_item)
  993. return -EEXIST;
  994. rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
  995. if (!rxl_item)
  996. return -ENOMEM;
  997. rxl_item->rxl = *rxl;
  998. rxl_item->priv = priv;
  999. list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
  1000. return 0;
  1001. }
  1002. EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
  1003. void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
  1004. const struct mlxsw_rx_listener *rxl,
  1005. void *priv)
  1006. {
  1007. struct mlxsw_rx_listener_item *rxl_item;
  1008. rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
  1009. if (!rxl_item)
  1010. return;
  1011. list_del_rcu(&rxl_item->list);
  1012. synchronize_rcu();
  1013. kfree(rxl_item);
  1014. }
  1015. EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
  1016. static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
  1017. void *priv)
  1018. {
  1019. struct mlxsw_event_listener_item *event_listener_item = priv;
  1020. struct mlxsw_reg_info reg;
  1021. char *payload;
  1022. char *op_tlv = mlxsw_emad_op_tlv(skb);
  1023. char *reg_tlv = mlxsw_emad_reg_tlv(skb);
  1024. reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
  1025. reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
  1026. payload = mlxsw_emad_reg_payload(op_tlv);
  1027. event_listener_item->el.func(&reg, payload, event_listener_item->priv);
  1028. dev_kfree_skb(skb);
  1029. }
  1030. static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
  1031. const struct mlxsw_event_listener *el_b)
  1032. {
  1033. return (el_a->func == el_b->func &&
  1034. el_a->trap_id == el_b->trap_id);
  1035. }
  1036. static struct mlxsw_event_listener_item *
  1037. __find_event_listener_item(struct mlxsw_core *mlxsw_core,
  1038. const struct mlxsw_event_listener *el,
  1039. void *priv)
  1040. {
  1041. struct mlxsw_event_listener_item *el_item;
  1042. list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
  1043. if (__is_event_listener_equal(&el_item->el, el) &&
  1044. el_item->priv == priv)
  1045. return el_item;
  1046. }
  1047. return NULL;
  1048. }
  1049. int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
  1050. const struct mlxsw_event_listener *el,
  1051. void *priv)
  1052. {
  1053. int err;
  1054. struct mlxsw_event_listener_item *el_item;
  1055. const struct mlxsw_rx_listener rxl = {
  1056. .func = mlxsw_core_event_listener_func,
  1057. .local_port = MLXSW_PORT_DONT_CARE,
  1058. .trap_id = el->trap_id,
  1059. };
  1060. el_item = __find_event_listener_item(mlxsw_core, el, priv);
  1061. if (el_item)
  1062. return -EEXIST;
  1063. el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
  1064. if (!el_item)
  1065. return -ENOMEM;
  1066. el_item->el = *el;
  1067. el_item->priv = priv;
  1068. err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
  1069. if (err)
  1070. goto err_rx_listener_register;
  1071. /* No reason to save item if we did not manage to register an RX
  1072. * listener for it.
  1073. */
  1074. list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
  1075. return 0;
  1076. err_rx_listener_register:
  1077. kfree(el_item);
  1078. return err;
  1079. }
  1080. EXPORT_SYMBOL(mlxsw_core_event_listener_register);
  1081. void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
  1082. const struct mlxsw_event_listener *el,
  1083. void *priv)
  1084. {
  1085. struct mlxsw_event_listener_item *el_item;
  1086. const struct mlxsw_rx_listener rxl = {
  1087. .func = mlxsw_core_event_listener_func,
  1088. .local_port = MLXSW_PORT_DONT_CARE,
  1089. .trap_id = el->trap_id,
  1090. };
  1091. el_item = __find_event_listener_item(mlxsw_core, el, priv);
  1092. if (!el_item)
  1093. return;
  1094. mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
  1095. list_del(&el_item->list);
  1096. kfree(el_item);
  1097. }
  1098. EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
  1099. static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
  1100. const struct mlxsw_listener *listener,
  1101. void *priv)
  1102. {
  1103. if (listener->is_event)
  1104. return mlxsw_core_event_listener_register(mlxsw_core,
  1105. &listener->u.event_listener,
  1106. priv);
  1107. else
  1108. return mlxsw_core_rx_listener_register(mlxsw_core,
  1109. &listener->u.rx_listener,
  1110. priv);
  1111. }
  1112. static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
  1113. const struct mlxsw_listener *listener,
  1114. void *priv)
  1115. {
  1116. if (listener->is_event)
  1117. mlxsw_core_event_listener_unregister(mlxsw_core,
  1118. &listener->u.event_listener,
  1119. priv);
  1120. else
  1121. mlxsw_core_rx_listener_unregister(mlxsw_core,
  1122. &listener->u.rx_listener,
  1123. priv);
  1124. }
  1125. int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
  1126. const struct mlxsw_listener *listener, void *priv)
  1127. {
  1128. char hpkt_pl[MLXSW_REG_HPKT_LEN];
  1129. int err;
  1130. err = mlxsw_core_listener_register(mlxsw_core, listener, priv);
  1131. if (err)
  1132. return err;
  1133. mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id,
  1134. listener->trap_group, listener->is_ctrl);
  1135. err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
  1136. if (err)
  1137. goto err_trap_set;
  1138. return 0;
  1139. err_trap_set:
  1140. mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
  1141. return err;
  1142. }
  1143. EXPORT_SYMBOL(mlxsw_core_trap_register);
  1144. void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
  1145. const struct mlxsw_listener *listener,
  1146. void *priv)
  1147. {
  1148. char hpkt_pl[MLXSW_REG_HPKT_LEN];
  1149. if (!listener->is_event) {
  1150. mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action,
  1151. listener->trap_id, listener->trap_group,
  1152. listener->is_ctrl);
  1153. mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
  1154. }
  1155. mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
  1156. }
  1157. EXPORT_SYMBOL(mlxsw_core_trap_unregister);
  1158. static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
  1159. {
  1160. return atomic64_inc_return(&mlxsw_core->emad.tid);
  1161. }
  1162. static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
  1163. const struct mlxsw_reg_info *reg,
  1164. char *payload,
  1165. enum mlxsw_core_reg_access_type type,
  1166. struct list_head *bulk_list,
  1167. mlxsw_reg_trans_cb_t *cb,
  1168. unsigned long cb_priv)
  1169. {
  1170. u64 tid = mlxsw_core_tid_get(mlxsw_core);
  1171. struct mlxsw_reg_trans *trans;
  1172. int err;
  1173. trans = kzalloc(sizeof(*trans), GFP_KERNEL);
  1174. if (!trans)
  1175. return -ENOMEM;
  1176. err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
  1177. bulk_list, cb, cb_priv, tid);
  1178. if (err) {
  1179. kfree(trans);
  1180. return err;
  1181. }
  1182. return 0;
  1183. }
  1184. int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
  1185. const struct mlxsw_reg_info *reg, char *payload,
  1186. struct list_head *bulk_list,
  1187. mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
  1188. {
  1189. return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
  1190. MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
  1191. bulk_list, cb, cb_priv);
  1192. }
  1193. EXPORT_SYMBOL(mlxsw_reg_trans_query);
  1194. int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
  1195. const struct mlxsw_reg_info *reg, char *payload,
  1196. struct list_head *bulk_list,
  1197. mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
  1198. {
  1199. return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
  1200. MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
  1201. bulk_list, cb, cb_priv);
  1202. }
  1203. EXPORT_SYMBOL(mlxsw_reg_trans_write);
  1204. static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
  1205. {
  1206. struct mlxsw_core *mlxsw_core = trans->core;
  1207. int err;
  1208. wait_for_completion(&trans->completion);
  1209. cancel_delayed_work_sync(&trans->timeout_dw);
  1210. err = trans->err;
  1211. if (trans->retries)
  1212. dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
  1213. trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
  1214. if (err)
  1215. dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
  1216. trans->tid, trans->reg->id,
  1217. mlxsw_reg_id_str(trans->reg->id),
  1218. mlxsw_core_reg_access_type_str(trans->type),
  1219. trans->emad_status,
  1220. mlxsw_emad_op_tlv_status_str(trans->emad_status));
  1221. list_del(&trans->bulk_list);
  1222. kfree_rcu(trans, rcu);
  1223. return err;
  1224. }
  1225. int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
  1226. {
  1227. struct mlxsw_reg_trans *trans;
  1228. struct mlxsw_reg_trans *tmp;
  1229. int sum_err = 0;
  1230. int err;
  1231. list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
  1232. err = mlxsw_reg_trans_wait(trans);
  1233. if (err && sum_err == 0)
  1234. sum_err = err; /* first error to be returned */
  1235. }
  1236. return sum_err;
  1237. }
  1238. EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
  1239. static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
  1240. const struct mlxsw_reg_info *reg,
  1241. char *payload,
  1242. enum mlxsw_core_reg_access_type type)
  1243. {
  1244. enum mlxsw_emad_op_tlv_status status;
  1245. int err, n_retry;
  1246. bool reset_ok;
  1247. char *in_mbox, *out_mbox, *tmp;
  1248. dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
  1249. reg->id, mlxsw_reg_id_str(reg->id),
  1250. mlxsw_core_reg_access_type_str(type));
  1251. in_mbox = mlxsw_cmd_mbox_alloc();
  1252. if (!in_mbox)
  1253. return -ENOMEM;
  1254. out_mbox = mlxsw_cmd_mbox_alloc();
  1255. if (!out_mbox) {
  1256. err = -ENOMEM;
  1257. goto free_in_mbox;
  1258. }
  1259. mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
  1260. mlxsw_core_tid_get(mlxsw_core));
  1261. tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
  1262. mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
  1263. /* There is a special treatment needed for MRSR (reset) register.
  1264. * The command interface will return error after the command
  1265. * is executed, so tell the lower layer to expect it
  1266. * and cope accordingly.
  1267. */
  1268. reset_ok = reg->id == MLXSW_REG_MRSR_ID;
  1269. n_retry = 0;
  1270. retry:
  1271. err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox);
  1272. if (!err) {
  1273. err = mlxsw_emad_process_status(out_mbox, &status);
  1274. if (err) {
  1275. if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
  1276. goto retry;
  1277. dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
  1278. status, mlxsw_emad_op_tlv_status_str(status));
  1279. }
  1280. }
  1281. if (!err)
  1282. memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
  1283. reg->len);
  1284. mlxsw_cmd_mbox_free(out_mbox);
  1285. free_in_mbox:
  1286. mlxsw_cmd_mbox_free(in_mbox);
  1287. if (err)
  1288. dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
  1289. reg->id, mlxsw_reg_id_str(reg->id),
  1290. mlxsw_core_reg_access_type_str(type));
  1291. return err;
  1292. }
  1293. static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
  1294. char *payload, size_t payload_len,
  1295. unsigned long cb_priv)
  1296. {
  1297. char *orig_payload = (char *) cb_priv;
  1298. memcpy(orig_payload, payload, payload_len);
  1299. }
  1300. static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
  1301. const struct mlxsw_reg_info *reg,
  1302. char *payload,
  1303. enum mlxsw_core_reg_access_type type)
  1304. {
  1305. LIST_HEAD(bulk_list);
  1306. int err;
  1307. /* During initialization EMAD interface is not available to us,
  1308. * so we default to command interface. We switch to EMAD interface
  1309. * after setting the appropriate traps.
  1310. */
  1311. if (!mlxsw_core->emad.use_emad)
  1312. return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
  1313. payload, type);
  1314. err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
  1315. payload, type, &bulk_list,
  1316. mlxsw_core_reg_access_cb,
  1317. (unsigned long) payload);
  1318. if (err)
  1319. return err;
  1320. return mlxsw_reg_trans_bulk_wait(&bulk_list);
  1321. }
  1322. int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
  1323. const struct mlxsw_reg_info *reg, char *payload)
  1324. {
  1325. return mlxsw_core_reg_access(mlxsw_core, reg, payload,
  1326. MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
  1327. }
  1328. EXPORT_SYMBOL(mlxsw_reg_query);
  1329. int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
  1330. const struct mlxsw_reg_info *reg, char *payload)
  1331. {
  1332. return mlxsw_core_reg_access(mlxsw_core, reg, payload,
  1333. MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
  1334. }
  1335. EXPORT_SYMBOL(mlxsw_reg_write);
  1336. void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
  1337. struct mlxsw_rx_info *rx_info)
  1338. {
  1339. struct mlxsw_rx_listener_item *rxl_item;
  1340. const struct mlxsw_rx_listener *rxl;
  1341. u8 local_port;
  1342. bool found = false;
  1343. if (rx_info->is_lag) {
  1344. dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
  1345. __func__, rx_info->u.lag_id,
  1346. rx_info->trap_id);
  1347. /* Upper layer does not care if the skb came from LAG or not,
  1348. * so just get the local_port for the lag port and push it up.
  1349. */
  1350. local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
  1351. rx_info->u.lag_id,
  1352. rx_info->lag_port_index);
  1353. } else {
  1354. local_port = rx_info->u.sys_port;
  1355. }
  1356. dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
  1357. __func__, local_port, rx_info->trap_id);
  1358. if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
  1359. (local_port >= mlxsw_core->max_ports))
  1360. goto drop;
  1361. rcu_read_lock();
  1362. list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
  1363. rxl = &rxl_item->rxl;
  1364. if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
  1365. rxl->local_port == local_port) &&
  1366. rxl->trap_id == rx_info->trap_id) {
  1367. found = true;
  1368. break;
  1369. }
  1370. }
  1371. rcu_read_unlock();
  1372. if (!found)
  1373. goto drop;
  1374. rxl->func(skb, local_port, rxl_item->priv);
  1375. return;
  1376. drop:
  1377. dev_kfree_skb(skb);
  1378. }
  1379. EXPORT_SYMBOL(mlxsw_core_skb_receive);
  1380. static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
  1381. u16 lag_id, u8 port_index)
  1382. {
  1383. return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id +
  1384. port_index;
  1385. }
  1386. void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
  1387. u16 lag_id, u8 port_index, u8 local_port)
  1388. {
  1389. int index = mlxsw_core_lag_mapping_index(mlxsw_core,
  1390. lag_id, port_index);
  1391. mlxsw_core->lag.mapping[index] = local_port;
  1392. }
  1393. EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
  1394. u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
  1395. u16 lag_id, u8 port_index)
  1396. {
  1397. int index = mlxsw_core_lag_mapping_index(mlxsw_core,
  1398. lag_id, port_index);
  1399. return mlxsw_core->lag.mapping[index];
  1400. }
  1401. EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
  1402. void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
  1403. u16 lag_id, u8 local_port)
  1404. {
  1405. int i;
  1406. for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) {
  1407. int index = mlxsw_core_lag_mapping_index(mlxsw_core,
  1408. lag_id, i);
  1409. if (mlxsw_core->lag.mapping[index] == local_port)
  1410. mlxsw_core->lag.mapping[index] = 0;
  1411. }
  1412. }
  1413. EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
  1414. bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
  1415. enum mlxsw_res_id res_id)
  1416. {
  1417. return mlxsw_res_valid(&mlxsw_core->res, res_id);
  1418. }
  1419. EXPORT_SYMBOL(mlxsw_core_res_valid);
  1420. u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
  1421. enum mlxsw_res_id res_id)
  1422. {
  1423. return mlxsw_res_get(&mlxsw_core->res, res_id);
  1424. }
  1425. EXPORT_SYMBOL(mlxsw_core_res_get);
  1426. int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port)
  1427. {
  1428. struct devlink *devlink = priv_to_devlink(mlxsw_core);
  1429. struct mlxsw_core_port *mlxsw_core_port =
  1430. &mlxsw_core->ports[local_port];
  1431. struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
  1432. int err;
  1433. mlxsw_core_port->local_port = local_port;
  1434. err = devlink_port_register(devlink, devlink_port, local_port);
  1435. if (err)
  1436. memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
  1437. return err;
  1438. }
  1439. EXPORT_SYMBOL(mlxsw_core_port_init);
  1440. void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
  1441. {
  1442. struct mlxsw_core_port *mlxsw_core_port =
  1443. &mlxsw_core->ports[local_port];
  1444. struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
  1445. devlink_port_unregister(devlink_port);
  1446. memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
  1447. }
  1448. EXPORT_SYMBOL(mlxsw_core_port_fini);
  1449. void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
  1450. void *port_driver_priv, struct net_device *dev,
  1451. u32 port_number, bool split,
  1452. u32 split_port_subnumber)
  1453. {
  1454. struct mlxsw_core_port *mlxsw_core_port =
  1455. &mlxsw_core->ports[local_port];
  1456. struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
  1457. mlxsw_core_port->port_driver_priv = port_driver_priv;
  1458. devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
  1459. port_number, split, split_port_subnumber);
  1460. devlink_port_type_eth_set(devlink_port, dev);
  1461. }
  1462. EXPORT_SYMBOL(mlxsw_core_port_eth_set);
  1463. void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
  1464. void *port_driver_priv)
  1465. {
  1466. struct mlxsw_core_port *mlxsw_core_port =
  1467. &mlxsw_core->ports[local_port];
  1468. struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
  1469. mlxsw_core_port->port_driver_priv = port_driver_priv;
  1470. devlink_port_type_ib_set(devlink_port, NULL);
  1471. }
  1472. EXPORT_SYMBOL(mlxsw_core_port_ib_set);
  1473. void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
  1474. void *port_driver_priv)
  1475. {
  1476. struct mlxsw_core_port *mlxsw_core_port =
  1477. &mlxsw_core->ports[local_port];
  1478. struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
  1479. mlxsw_core_port->port_driver_priv = port_driver_priv;
  1480. devlink_port_type_clear(devlink_port);
  1481. }
  1482. EXPORT_SYMBOL(mlxsw_core_port_clear);
  1483. enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
  1484. u8 local_port)
  1485. {
  1486. struct mlxsw_core_port *mlxsw_core_port =
  1487. &mlxsw_core->ports[local_port];
  1488. struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
  1489. return devlink_port->type;
  1490. }
  1491. EXPORT_SYMBOL(mlxsw_core_port_type_get);
  1492. int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core,
  1493. u8 local_port, char *name, size_t len)
  1494. {
  1495. struct mlxsw_core_port *mlxsw_core_port =
  1496. &mlxsw_core->ports[local_port];
  1497. struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
  1498. return devlink_port_get_phys_port_name(devlink_port, name, len);
  1499. }
  1500. EXPORT_SYMBOL(mlxsw_core_port_get_phys_port_name);
  1501. static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
  1502. const char *buf, size_t size)
  1503. {
  1504. __be32 *m = (__be32 *) buf;
  1505. int i;
  1506. int count = size / sizeof(__be32);
  1507. for (i = count - 1; i >= 0; i--)
  1508. if (m[i])
  1509. break;
  1510. i++;
  1511. count = i ? i : 1;
  1512. for (i = 0; i < count; i += 4)
  1513. dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
  1514. i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
  1515. be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
  1516. }
  1517. int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
  1518. u32 in_mod, bool out_mbox_direct, bool reset_ok,
  1519. char *in_mbox, size_t in_mbox_size,
  1520. char *out_mbox, size_t out_mbox_size)
  1521. {
  1522. u8 status;
  1523. int err;
  1524. BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
  1525. if (!mlxsw_core->bus->cmd_exec)
  1526. return -EOPNOTSUPP;
  1527. dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
  1528. opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
  1529. if (in_mbox) {
  1530. dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
  1531. mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
  1532. }
  1533. err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
  1534. opcode_mod, in_mod, out_mbox_direct,
  1535. in_mbox, in_mbox_size,
  1536. out_mbox, out_mbox_size, &status);
  1537. if (!err && out_mbox) {
  1538. dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
  1539. mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
  1540. }
  1541. if (reset_ok && err == -EIO &&
  1542. status == MLXSW_CMD_STATUS_RUNNING_RESET) {
  1543. err = 0;
  1544. } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
  1545. dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
  1546. opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
  1547. in_mod, status, mlxsw_cmd_status_str(status));
  1548. } else if (err == -ETIMEDOUT) {
  1549. dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
  1550. opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
  1551. in_mod);
  1552. }
  1553. return err;
  1554. }
  1555. EXPORT_SYMBOL(mlxsw_cmd_exec);
  1556. int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
  1557. {
  1558. return queue_delayed_work(mlxsw_wq, dwork, delay);
  1559. }
  1560. EXPORT_SYMBOL(mlxsw_core_schedule_dw);
  1561. bool mlxsw_core_schedule_work(struct work_struct *work)
  1562. {
  1563. return queue_work(mlxsw_owq, work);
  1564. }
  1565. EXPORT_SYMBOL(mlxsw_core_schedule_work);
  1566. void mlxsw_core_flush_owq(void)
  1567. {
  1568. flush_workqueue(mlxsw_owq);
  1569. }
  1570. EXPORT_SYMBOL(mlxsw_core_flush_owq);
  1571. int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
  1572. const struct mlxsw_config_profile *profile,
  1573. u64 *p_single_size, u64 *p_double_size,
  1574. u64 *p_linear_size)
  1575. {
  1576. struct mlxsw_driver *driver = mlxsw_core->driver;
  1577. if (!driver->kvd_sizes_get)
  1578. return -EINVAL;
  1579. return driver->kvd_sizes_get(mlxsw_core, profile,
  1580. p_single_size, p_double_size,
  1581. p_linear_size);
  1582. }
  1583. EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
  1584. void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
  1585. {
  1586. mlxsw_core->fw_flash_in_progress = true;
  1587. }
  1588. EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
  1589. void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
  1590. {
  1591. mlxsw_core->fw_flash_in_progress = false;
  1592. }
  1593. EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
  1594. static int __init mlxsw_core_module_init(void)
  1595. {
  1596. int err;
  1597. mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
  1598. if (!mlxsw_wq)
  1599. return -ENOMEM;
  1600. mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
  1601. mlxsw_core_driver_name);
  1602. if (!mlxsw_owq) {
  1603. err = -ENOMEM;
  1604. goto err_alloc_ordered_workqueue;
  1605. }
  1606. return 0;
  1607. err_alloc_ordered_workqueue:
  1608. destroy_workqueue(mlxsw_wq);
  1609. return err;
  1610. }
  1611. static void __exit mlxsw_core_module_exit(void)
  1612. {
  1613. destroy_workqueue(mlxsw_owq);
  1614. destroy_workqueue(mlxsw_wq);
  1615. }
  1616. module_init(mlxsw_core_module_init);
  1617. module_exit(mlxsw_core_module_exit);
  1618. MODULE_LICENSE("Dual BSD/GPL");
  1619. MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
  1620. MODULE_DESCRIPTION("Mellanox switch device core driver");