rxe_net.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751
  1. /*
  2. * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  3. * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/skbuff.h>
  34. #include <linux/if_arp.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/if.h>
  37. #include <linux/if_vlan.h>
  38. #include <net/udp_tunnel.h>
  39. #include <net/sch_generic.h>
  40. #include <linux/netfilter.h>
  41. #include <rdma/ib_addr.h>
  42. #include "rxe.h"
  43. #include "rxe_net.h"
  44. #include "rxe_loc.h"
  45. static LIST_HEAD(rxe_dev_list);
  46. static DEFINE_SPINLOCK(dev_list_lock); /* spinlock for device list */
  47. struct rxe_dev *net_to_rxe(struct net_device *ndev)
  48. {
  49. struct rxe_dev *rxe;
  50. struct rxe_dev *found = NULL;
  51. spin_lock_bh(&dev_list_lock);
  52. list_for_each_entry(rxe, &rxe_dev_list, list) {
  53. if (rxe->ndev == ndev) {
  54. found = rxe;
  55. break;
  56. }
  57. }
  58. spin_unlock_bh(&dev_list_lock);
  59. return found;
  60. }
  61. struct rxe_dev *get_rxe_by_name(const char *name)
  62. {
  63. struct rxe_dev *rxe;
  64. struct rxe_dev *found = NULL;
  65. spin_lock_bh(&dev_list_lock);
  66. list_for_each_entry(rxe, &rxe_dev_list, list) {
  67. if (!strcmp(name, rxe->ib_dev.name)) {
  68. found = rxe;
  69. break;
  70. }
  71. }
  72. spin_unlock_bh(&dev_list_lock);
  73. return found;
  74. }
  75. static struct rxe_recv_sockets recv_sockets;
  76. struct device *rxe_dma_device(struct rxe_dev *rxe)
  77. {
  78. struct net_device *ndev;
  79. ndev = rxe->ndev;
  80. if (is_vlan_dev(ndev))
  81. ndev = vlan_dev_real_dev(ndev);
  82. return ndev->dev.parent;
  83. }
  84. int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
  85. {
  86. int err;
  87. unsigned char ll_addr[ETH_ALEN];
  88. ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
  89. err = dev_mc_add(rxe->ndev, ll_addr);
  90. return err;
  91. }
  92. int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
  93. {
  94. int err;
  95. unsigned char ll_addr[ETH_ALEN];
  96. ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
  97. err = dev_mc_del(rxe->ndev, ll_addr);
  98. return err;
  99. }
  100. static struct dst_entry *rxe_find_route4(struct net_device *ndev,
  101. struct in_addr *saddr,
  102. struct in_addr *daddr)
  103. {
  104. struct rtable *rt;
  105. struct flowi4 fl = { { 0 } };
  106. memset(&fl, 0, sizeof(fl));
  107. fl.flowi4_oif = ndev->ifindex;
  108. memcpy(&fl.saddr, saddr, sizeof(*saddr));
  109. memcpy(&fl.daddr, daddr, sizeof(*daddr));
  110. fl.flowi4_proto = IPPROTO_UDP;
  111. rt = ip_route_output_key(&init_net, &fl);
  112. if (IS_ERR(rt)) {
  113. pr_err_ratelimited("no route to %pI4\n", &daddr->s_addr);
  114. return NULL;
  115. }
  116. return &rt->dst;
  117. }
  118. #if IS_ENABLED(CONFIG_IPV6)
  119. static struct dst_entry *rxe_find_route6(struct net_device *ndev,
  120. struct in6_addr *saddr,
  121. struct in6_addr *daddr)
  122. {
  123. struct dst_entry *ndst;
  124. struct flowi6 fl6 = { { 0 } };
  125. memset(&fl6, 0, sizeof(fl6));
  126. fl6.flowi6_oif = ndev->ifindex;
  127. memcpy(&fl6.saddr, saddr, sizeof(*saddr));
  128. memcpy(&fl6.daddr, daddr, sizeof(*daddr));
  129. fl6.flowi6_proto = IPPROTO_UDP;
  130. if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk),
  131. recv_sockets.sk6->sk, &ndst, &fl6))) {
  132. pr_err_ratelimited("no route to %pI6\n", daddr);
  133. goto put;
  134. }
  135. if (unlikely(ndst->error)) {
  136. pr_err("no route to %pI6\n", daddr);
  137. goto put;
  138. }
  139. return ndst;
  140. put:
  141. dst_release(ndst);
  142. return NULL;
  143. }
  144. #else
  145. static struct dst_entry *rxe_find_route6(struct net_device *ndev,
  146. struct in6_addr *saddr,
  147. struct in6_addr *daddr)
  148. {
  149. return NULL;
  150. }
  151. #endif
  152. static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
  153. struct rxe_qp *qp,
  154. struct rxe_av *av)
  155. {
  156. const struct ib_gid_attr *attr;
  157. struct dst_entry *dst = NULL;
  158. struct net_device *ndev;
  159. attr = rdma_get_gid_attr(&rxe->ib_dev, qp->attr.port_num,
  160. av->grh.sgid_index);
  161. if (IS_ERR(attr))
  162. return NULL;
  163. ndev = attr->ndev;
  164. if (qp_type(qp) == IB_QPT_RC)
  165. dst = sk_dst_get(qp->sk->sk);
  166. if (!dst || !dst_check(dst, qp->dst_cookie)) {
  167. if (dst)
  168. dst_release(dst);
  169. if (av->network_type == RDMA_NETWORK_IPV4) {
  170. struct in_addr *saddr;
  171. struct in_addr *daddr;
  172. saddr = &av->sgid_addr._sockaddr_in.sin_addr;
  173. daddr = &av->dgid_addr._sockaddr_in.sin_addr;
  174. dst = rxe_find_route4(ndev, saddr, daddr);
  175. } else if (av->network_type == RDMA_NETWORK_IPV6) {
  176. struct in6_addr *saddr6;
  177. struct in6_addr *daddr6;
  178. saddr6 = &av->sgid_addr._sockaddr_in6.sin6_addr;
  179. daddr6 = &av->dgid_addr._sockaddr_in6.sin6_addr;
  180. dst = rxe_find_route6(ndev, saddr6, daddr6);
  181. #if IS_ENABLED(CONFIG_IPV6)
  182. if (dst)
  183. qp->dst_cookie =
  184. rt6_get_cookie((struct rt6_info *)dst);
  185. #endif
  186. }
  187. if (dst && (qp_type(qp) == IB_QPT_RC)) {
  188. dst_hold(dst);
  189. sk_dst_set(qp->sk->sk, dst);
  190. }
  191. }
  192. rdma_put_gid_attr(attr);
  193. return dst;
  194. }
  195. static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
  196. {
  197. struct udphdr *udph;
  198. struct net_device *ndev = skb->dev;
  199. struct net_device *rdev = ndev;
  200. struct rxe_dev *rxe = net_to_rxe(ndev);
  201. struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
  202. if (!rxe && is_vlan_dev(rdev)) {
  203. rdev = vlan_dev_real_dev(ndev);
  204. rxe = net_to_rxe(rdev);
  205. }
  206. if (!rxe)
  207. goto drop;
  208. if (skb_linearize(skb)) {
  209. pr_err("skb_linearize failed\n");
  210. goto drop;
  211. }
  212. udph = udp_hdr(skb);
  213. pkt->rxe = rxe;
  214. pkt->port_num = 1;
  215. pkt->hdr = (u8 *)(udph + 1);
  216. pkt->mask = RXE_GRH_MASK;
  217. pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
  218. rxe_rcv(skb);
  219. return 0;
  220. drop:
  221. kfree_skb(skb);
  222. return 0;
  223. }
  224. static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
  225. bool ipv6)
  226. {
  227. int err;
  228. struct socket *sock;
  229. struct udp_port_cfg udp_cfg = { };
  230. struct udp_tunnel_sock_cfg tnl_cfg = { };
  231. if (ipv6) {
  232. udp_cfg.family = AF_INET6;
  233. udp_cfg.ipv6_v6only = 1;
  234. } else {
  235. udp_cfg.family = AF_INET;
  236. }
  237. udp_cfg.local_udp_port = port;
  238. /* Create UDP socket */
  239. err = udp_sock_create(net, &udp_cfg, &sock);
  240. if (err < 0) {
  241. pr_err("failed to create udp socket. err = %d\n", err);
  242. return ERR_PTR(err);
  243. }
  244. tnl_cfg.encap_type = 1;
  245. tnl_cfg.encap_rcv = rxe_udp_encap_recv;
  246. /* Setup UDP tunnel */
  247. setup_udp_tunnel_sock(net, sock, &tnl_cfg);
  248. return sock;
  249. }
  250. static void rxe_release_udp_tunnel(struct socket *sk)
  251. {
  252. if (sk)
  253. udp_tunnel_sock_release(sk);
  254. }
  255. static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
  256. __be16 dst_port)
  257. {
  258. struct udphdr *udph;
  259. __skb_push(skb, sizeof(*udph));
  260. skb_reset_transport_header(skb);
  261. udph = udp_hdr(skb);
  262. udph->dest = dst_port;
  263. udph->source = src_port;
  264. udph->len = htons(skb->len);
  265. udph->check = 0;
  266. }
  267. static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
  268. __be32 saddr, __be32 daddr, __u8 proto,
  269. __u8 tos, __u8 ttl, __be16 df, bool xnet)
  270. {
  271. struct iphdr *iph;
  272. skb_scrub_packet(skb, xnet);
  273. skb_clear_hash(skb);
  274. skb_dst_set(skb, dst_clone(dst));
  275. memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
  276. skb_push(skb, sizeof(struct iphdr));
  277. skb_reset_network_header(skb);
  278. iph = ip_hdr(skb);
  279. iph->version = IPVERSION;
  280. iph->ihl = sizeof(struct iphdr) >> 2;
  281. iph->frag_off = df;
  282. iph->protocol = proto;
  283. iph->tos = tos;
  284. iph->daddr = daddr;
  285. iph->saddr = saddr;
  286. iph->ttl = ttl;
  287. __ip_select_ident(dev_net(dst->dev), iph,
  288. skb_shinfo(skb)->gso_segs ?: 1);
  289. iph->tot_len = htons(skb->len);
  290. ip_send_check(iph);
  291. }
  292. static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
  293. struct in6_addr *saddr, struct in6_addr *daddr,
  294. __u8 proto, __u8 prio, __u8 ttl)
  295. {
  296. struct ipv6hdr *ip6h;
  297. memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
  298. IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
  299. | IPSKB_REROUTED);
  300. skb_dst_set(skb, dst_clone(dst));
  301. __skb_push(skb, sizeof(*ip6h));
  302. skb_reset_network_header(skb);
  303. ip6h = ipv6_hdr(skb);
  304. ip6_flow_hdr(ip6h, prio, htonl(0));
  305. ip6h->payload_len = htons(skb->len);
  306. ip6h->nexthdr = proto;
  307. ip6h->hop_limit = ttl;
  308. ip6h->daddr = *daddr;
  309. ip6h->saddr = *saddr;
  310. ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
  311. }
  312. static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
  313. struct sk_buff *skb, struct rxe_av *av)
  314. {
  315. struct rxe_qp *qp = pkt->qp;
  316. struct dst_entry *dst;
  317. bool xnet = false;
  318. __be16 df = htons(IP_DF);
  319. struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
  320. struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
  321. dst = rxe_find_route(rxe, qp, av);
  322. if (!dst) {
  323. pr_err("Host not reachable\n");
  324. return -EHOSTUNREACH;
  325. }
  326. if (!memcmp(saddr, daddr, sizeof(*daddr)))
  327. pkt->mask |= RXE_LOOPBACK_MASK;
  328. prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
  329. htons(ROCE_V2_UDP_DPORT));
  330. prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
  331. av->grh.traffic_class, av->grh.hop_limit, df, xnet);
  332. dst_release(dst);
  333. return 0;
  334. }
  335. static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
  336. struct sk_buff *skb, struct rxe_av *av)
  337. {
  338. struct rxe_qp *qp = pkt->qp;
  339. struct dst_entry *dst;
  340. struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
  341. struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
  342. dst = rxe_find_route(rxe, qp, av);
  343. if (!dst) {
  344. pr_err("Host not reachable\n");
  345. return -EHOSTUNREACH;
  346. }
  347. if (!memcmp(saddr, daddr, sizeof(*daddr)))
  348. pkt->mask |= RXE_LOOPBACK_MASK;
  349. prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
  350. htons(ROCE_V2_UDP_DPORT));
  351. prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP,
  352. av->grh.traffic_class,
  353. av->grh.hop_limit);
  354. dst_release(dst);
  355. return 0;
  356. }
  357. int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
  358. struct sk_buff *skb, u32 *crc)
  359. {
  360. int err = 0;
  361. struct rxe_av *av = rxe_get_av(pkt);
  362. if (av->network_type == RDMA_NETWORK_IPV4)
  363. err = prepare4(rxe, pkt, skb, av);
  364. else if (av->network_type == RDMA_NETWORK_IPV6)
  365. err = prepare6(rxe, pkt, skb, av);
  366. *crc = rxe_icrc_hdr(pkt, skb);
  367. return err;
  368. }
  369. static void rxe_skb_tx_dtor(struct sk_buff *skb)
  370. {
  371. struct sock *sk = skb->sk;
  372. struct rxe_qp *qp = sk->sk_user_data;
  373. int skb_out = atomic_dec_return(&qp->skb_out);
  374. if (unlikely(qp->need_req_skb &&
  375. skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
  376. rxe_run_task(&qp->req.task, 1);
  377. rxe_drop_ref(qp);
  378. }
  379. int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
  380. {
  381. struct rxe_av *av;
  382. int err;
  383. av = rxe_get_av(pkt);
  384. skb->destructor = rxe_skb_tx_dtor;
  385. skb->sk = pkt->qp->sk->sk;
  386. rxe_add_ref(pkt->qp);
  387. atomic_inc(&pkt->qp->skb_out);
  388. if (av->network_type == RDMA_NETWORK_IPV4) {
  389. err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
  390. } else if (av->network_type == RDMA_NETWORK_IPV6) {
  391. err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
  392. } else {
  393. pr_err("Unknown layer 3 protocol: %d\n", av->network_type);
  394. atomic_dec(&pkt->qp->skb_out);
  395. rxe_drop_ref(pkt->qp);
  396. kfree_skb(skb);
  397. return -EINVAL;
  398. }
  399. if (unlikely(net_xmit_eval(err))) {
  400. pr_debug("error sending packet: %d\n", err);
  401. return -EAGAIN;
  402. }
  403. return 0;
  404. }
  405. void rxe_loopback(struct sk_buff *skb)
  406. {
  407. rxe_rcv(skb);
  408. }
  409. static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av)
  410. {
  411. return rxe->port.port_guid == av->grh.dgid.global.interface_id;
  412. }
  413. struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
  414. int paylen, struct rxe_pkt_info *pkt)
  415. {
  416. unsigned int hdr_len;
  417. struct sk_buff *skb;
  418. struct net_device *ndev;
  419. const struct ib_gid_attr *attr;
  420. const int port_num = 1;
  421. attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index);
  422. if (IS_ERR(attr))
  423. return NULL;
  424. ndev = attr->ndev;
  425. if (av->network_type == RDMA_NETWORK_IPV4)
  426. hdr_len = ETH_HLEN + sizeof(struct udphdr) +
  427. sizeof(struct iphdr);
  428. else
  429. hdr_len = ETH_HLEN + sizeof(struct udphdr) +
  430. sizeof(struct ipv6hdr);
  431. skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev),
  432. GFP_ATOMIC);
  433. if (unlikely(!skb))
  434. goto out;
  435. skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev));
  436. /* FIXME: hold reference to this netdev until life of this skb. */
  437. skb->dev = ndev;
  438. if (av->network_type == RDMA_NETWORK_IPV4)
  439. skb->protocol = htons(ETH_P_IP);
  440. else
  441. skb->protocol = htons(ETH_P_IPV6);
  442. pkt->rxe = rxe;
  443. pkt->port_num = port_num;
  444. pkt->hdr = skb_put_zero(skb, paylen);
  445. pkt->mask |= RXE_GRH_MASK;
  446. out:
  447. rdma_put_gid_attr(attr);
  448. return skb;
  449. }
  450. /*
  451. * this is required by rxe_cfg to match rxe devices in
  452. * /sys/class/infiniband up with their underlying ethernet devices
  453. */
  454. const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num)
  455. {
  456. return rxe->ndev->name;
  457. }
  458. enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num)
  459. {
  460. return IB_LINK_LAYER_ETHERNET;
  461. }
  462. struct rxe_dev *rxe_net_add(struct net_device *ndev)
  463. {
  464. int err;
  465. struct rxe_dev *rxe = NULL;
  466. rxe = (struct rxe_dev *)ib_alloc_device(sizeof(*rxe));
  467. if (!rxe)
  468. return NULL;
  469. rxe->ndev = ndev;
  470. err = rxe_add(rxe, ndev->mtu);
  471. if (err) {
  472. ib_dealloc_device(&rxe->ib_dev);
  473. return NULL;
  474. }
  475. spin_lock_bh(&dev_list_lock);
  476. list_add_tail(&rxe->list, &rxe_dev_list);
  477. spin_unlock_bh(&dev_list_lock);
  478. return rxe;
  479. }
  480. void rxe_remove_all(void)
  481. {
  482. spin_lock_bh(&dev_list_lock);
  483. while (!list_empty(&rxe_dev_list)) {
  484. struct rxe_dev *rxe =
  485. list_first_entry(&rxe_dev_list, struct rxe_dev, list);
  486. list_del(&rxe->list);
  487. spin_unlock_bh(&dev_list_lock);
  488. rxe_remove(rxe);
  489. spin_lock_bh(&dev_list_lock);
  490. }
  491. spin_unlock_bh(&dev_list_lock);
  492. }
  493. static void rxe_port_event(struct rxe_dev *rxe,
  494. enum ib_event_type event)
  495. {
  496. struct ib_event ev;
  497. ev.device = &rxe->ib_dev;
  498. ev.element.port_num = 1;
  499. ev.event = event;
  500. ib_dispatch_event(&ev);
  501. }
  502. /* Caller must hold net_info_lock */
  503. void rxe_port_up(struct rxe_dev *rxe)
  504. {
  505. struct rxe_port *port;
  506. port = &rxe->port;
  507. port->attr.state = IB_PORT_ACTIVE;
  508. port->attr.phys_state = IB_PHYS_STATE_LINK_UP;
  509. rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
  510. pr_info("set %s active\n", rxe->ib_dev.name);
  511. }
  512. /* Caller must hold net_info_lock */
  513. void rxe_port_down(struct rxe_dev *rxe)
  514. {
  515. struct rxe_port *port;
  516. port = &rxe->port;
  517. port->attr.state = IB_PORT_DOWN;
  518. port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN;
  519. rxe_port_event(rxe, IB_EVENT_PORT_ERR);
  520. pr_info("set %s down\n", rxe->ib_dev.name);
  521. }
  522. static int rxe_notify(struct notifier_block *not_blk,
  523. unsigned long event,
  524. void *arg)
  525. {
  526. struct net_device *ndev = netdev_notifier_info_to_dev(arg);
  527. struct rxe_dev *rxe = net_to_rxe(ndev);
  528. if (!rxe)
  529. goto out;
  530. switch (event) {
  531. case NETDEV_UNREGISTER:
  532. list_del(&rxe->list);
  533. rxe_remove(rxe);
  534. break;
  535. case NETDEV_UP:
  536. rxe_port_up(rxe);
  537. break;
  538. case NETDEV_DOWN:
  539. rxe_port_down(rxe);
  540. break;
  541. case NETDEV_CHANGEMTU:
  542. pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu);
  543. rxe_set_mtu(rxe, ndev->mtu);
  544. break;
  545. case NETDEV_CHANGE:
  546. if (netif_running(ndev) && netif_carrier_ok(ndev))
  547. rxe_port_up(rxe);
  548. else
  549. rxe_port_down(rxe);
  550. break;
  551. case NETDEV_REBOOT:
  552. case NETDEV_GOING_DOWN:
  553. case NETDEV_CHANGEADDR:
  554. case NETDEV_CHANGENAME:
  555. case NETDEV_FEAT_CHANGE:
  556. default:
  557. pr_info("ignoring netdev event = %ld for %s\n",
  558. event, ndev->name);
  559. break;
  560. }
  561. out:
  562. return NOTIFY_OK;
  563. }
  564. static struct notifier_block rxe_net_notifier = {
  565. .notifier_call = rxe_notify,
  566. };
  567. static int rxe_net_ipv4_init(void)
  568. {
  569. recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
  570. htons(ROCE_V2_UDP_DPORT), false);
  571. if (IS_ERR(recv_sockets.sk4)) {
  572. recv_sockets.sk4 = NULL;
  573. pr_err("Failed to create IPv4 UDP tunnel\n");
  574. return -1;
  575. }
  576. return 0;
  577. }
  578. static int rxe_net_ipv6_init(void)
  579. {
  580. #if IS_ENABLED(CONFIG_IPV6)
  581. recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
  582. htons(ROCE_V2_UDP_DPORT), true);
  583. if (IS_ERR(recv_sockets.sk6)) {
  584. recv_sockets.sk6 = NULL;
  585. pr_err("Failed to create IPv6 UDP tunnel\n");
  586. return -1;
  587. }
  588. #endif
  589. return 0;
  590. }
  591. void rxe_net_exit(void)
  592. {
  593. rxe_release_udp_tunnel(recv_sockets.sk6);
  594. rxe_release_udp_tunnel(recv_sockets.sk4);
  595. unregister_netdevice_notifier(&rxe_net_notifier);
  596. }
  597. int rxe_net_init(void)
  598. {
  599. int err;
  600. recv_sockets.sk6 = NULL;
  601. err = rxe_net_ipv4_init();
  602. if (err)
  603. return err;
  604. err = rxe_net_ipv6_init();
  605. if (err)
  606. goto err_out;
  607. err = register_netdevice_notifier(&rxe_net_notifier);
  608. if (err) {
  609. pr_err("Failed to register netdev notifier\n");
  610. goto err_out;
  611. }
  612. return 0;
  613. err_out:
  614. rxe_net_exit();
  615. return err;
  616. }