vport.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628
  1. /*
  2. * Copyright (c) 2007-2014 Nicira, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of version 2 of the GNU General Public
  6. * License as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public License
  14. * along with this program; if not, write to the Free Software
  15. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  16. * 02110-1301, USA
  17. */
  18. #include <linux/etherdevice.h>
  19. #include <linux/if.h>
  20. #include <linux/if_vlan.h>
  21. #include <linux/jhash.h>
  22. #include <linux/kernel.h>
  23. #include <linux/list.h>
  24. #include <linux/mutex.h>
  25. #include <linux/percpu.h>
  26. #include <linux/rcupdate.h>
  27. #include <linux/rtnetlink.h>
  28. #include <linux/compat.h>
  29. #include <net/net_namespace.h>
  30. #include <linux/module.h>
  31. #include "datapath.h"
  32. #include "vport.h"
  33. #include "vport-internal_dev.h"
  34. static void ovs_vport_record_error(struct vport *,
  35. enum vport_err_type err_type);
  36. static LIST_HEAD(vport_ops_list);
  37. /* Protected by RCU read lock for reading, ovs_mutex for writing. */
  38. static struct hlist_head *dev_table;
  39. #define VPORT_HASH_BUCKETS 1024
  40. /**
  41. * ovs_vport_init - initialize vport subsystem
  42. *
  43. * Called at module load time to initialize the vport subsystem.
  44. */
  45. int ovs_vport_init(void)
  46. {
  47. dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
  48. GFP_KERNEL);
  49. if (!dev_table)
  50. return -ENOMEM;
  51. return 0;
  52. }
  53. /**
  54. * ovs_vport_exit - shutdown vport subsystem
  55. *
  56. * Called at module exit time to shutdown the vport subsystem.
  57. */
  58. void ovs_vport_exit(void)
  59. {
  60. kfree(dev_table);
  61. }
  62. static struct hlist_head *hash_bucket(const struct net *net, const char *name)
  63. {
  64. unsigned int hash = jhash(name, strlen(name), (unsigned long) net);
  65. return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
  66. }
  67. int ovs_vport_ops_register(struct vport_ops *ops)
  68. {
  69. int err = -EEXIST;
  70. struct vport_ops *o;
  71. ovs_lock();
  72. list_for_each_entry(o, &vport_ops_list, list)
  73. if (ops->type == o->type)
  74. goto errout;
  75. list_add_tail(&ops->list, &vport_ops_list);
  76. err = 0;
  77. errout:
  78. ovs_unlock();
  79. return err;
  80. }
  81. EXPORT_SYMBOL_GPL(ovs_vport_ops_register);
  82. void ovs_vport_ops_unregister(struct vport_ops *ops)
  83. {
  84. ovs_lock();
  85. list_del(&ops->list);
  86. ovs_unlock();
  87. }
  88. EXPORT_SYMBOL_GPL(ovs_vport_ops_unregister);
  89. /**
  90. * ovs_vport_locate - find a port that has already been created
  91. *
  92. * @name: name of port to find
  93. *
  94. * Must be called with ovs or RCU read lock.
  95. */
  96. struct vport *ovs_vport_locate(const struct net *net, const char *name)
  97. {
  98. struct hlist_head *bucket = hash_bucket(net, name);
  99. struct vport *vport;
  100. hlist_for_each_entry_rcu(vport, bucket, hash_node)
  101. if (!strcmp(name, vport->ops->get_name(vport)) &&
  102. net_eq(ovs_dp_get_net(vport->dp), net))
  103. return vport;
  104. return NULL;
  105. }
  106. /**
  107. * ovs_vport_alloc - allocate and initialize new vport
  108. *
  109. * @priv_size: Size of private data area to allocate.
  110. * @ops: vport device ops
  111. *
  112. * Allocate and initialize a new vport defined by @ops. The vport will contain
  113. * a private data area of size @priv_size that can be accessed using
  114. * vport_priv(). vports that are no longer needed should be released with
  115. * vport_free().
  116. */
  117. struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
  118. const struct vport_parms *parms)
  119. {
  120. struct vport *vport;
  121. size_t alloc_size;
  122. alloc_size = sizeof(struct vport);
  123. if (priv_size) {
  124. alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
  125. alloc_size += priv_size;
  126. }
  127. vport = kzalloc(alloc_size, GFP_KERNEL);
  128. if (!vport)
  129. return ERR_PTR(-ENOMEM);
  130. vport->dp = parms->dp;
  131. vport->port_no = parms->port_no;
  132. vport->ops = ops;
  133. INIT_HLIST_NODE(&vport->dp_hash_node);
  134. if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) {
  135. kfree(vport);
  136. return ERR_PTR(-EINVAL);
  137. }
  138. vport->percpu_stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
  139. if (!vport->percpu_stats) {
  140. kfree(vport);
  141. return ERR_PTR(-ENOMEM);
  142. }
  143. return vport;
  144. }
  145. EXPORT_SYMBOL_GPL(ovs_vport_alloc);
  146. /**
  147. * ovs_vport_free - uninitialize and free vport
  148. *
  149. * @vport: vport to free
  150. *
  151. * Frees a vport allocated with vport_alloc() when it is no longer needed.
  152. *
  153. * The caller must ensure that an RCU grace period has passed since the last
  154. * time @vport was in a datapath.
  155. */
  156. void ovs_vport_free(struct vport *vport)
  157. {
  158. /* vport is freed from RCU callback or error path, Therefore
  159. * it is safe to use raw dereference.
  160. */
  161. kfree(rcu_dereference_raw(vport->upcall_portids));
  162. free_percpu(vport->percpu_stats);
  163. kfree(vport);
  164. }
  165. EXPORT_SYMBOL_GPL(ovs_vport_free);
  166. static struct vport_ops *ovs_vport_lookup(const struct vport_parms *parms)
  167. {
  168. struct vport_ops *ops;
  169. list_for_each_entry(ops, &vport_ops_list, list)
  170. if (ops->type == parms->type)
  171. return ops;
  172. return NULL;
  173. }
  174. /**
  175. * ovs_vport_add - add vport device (for kernel callers)
  176. *
  177. * @parms: Information about new vport.
  178. *
  179. * Creates a new vport with the specified configuration (which is dependent on
  180. * device type). ovs_mutex must be held.
  181. */
  182. struct vport *ovs_vport_add(const struct vport_parms *parms)
  183. {
  184. struct vport_ops *ops;
  185. struct vport *vport;
  186. ops = ovs_vport_lookup(parms);
  187. if (ops) {
  188. struct hlist_head *bucket;
  189. if (!try_module_get(ops->owner))
  190. return ERR_PTR(-EAFNOSUPPORT);
  191. vport = ops->create(parms);
  192. if (IS_ERR(vport)) {
  193. module_put(ops->owner);
  194. return vport;
  195. }
  196. bucket = hash_bucket(ovs_dp_get_net(vport->dp),
  197. vport->ops->get_name(vport));
  198. hlist_add_head_rcu(&vport->hash_node, bucket);
  199. return vport;
  200. }
  201. /* Unlock to attempt module load and return -EAGAIN if load
  202. * was successful as we need to restart the port addition
  203. * workflow.
  204. */
  205. ovs_unlock();
  206. request_module("vport-type-%d", parms->type);
  207. ovs_lock();
  208. if (!ovs_vport_lookup(parms))
  209. return ERR_PTR(-EAFNOSUPPORT);
  210. else
  211. return ERR_PTR(-EAGAIN);
  212. }
  213. /**
  214. * ovs_vport_set_options - modify existing vport device (for kernel callers)
  215. *
  216. * @vport: vport to modify.
  217. * @options: New configuration.
  218. *
  219. * Modifies an existing device with the specified configuration (which is
  220. * dependent on device type). ovs_mutex must be held.
  221. */
  222. int ovs_vport_set_options(struct vport *vport, struct nlattr *options)
  223. {
  224. if (!vport->ops->set_options)
  225. return -EOPNOTSUPP;
  226. return vport->ops->set_options(vport, options);
  227. }
  228. /**
  229. * ovs_vport_del - delete existing vport device
  230. *
  231. * @vport: vport to delete.
  232. *
  233. * Detaches @vport from its datapath and destroys it. It is possible to fail
  234. * for reasons such as lack of memory. ovs_mutex must be held.
  235. */
  236. void ovs_vport_del(struct vport *vport)
  237. {
  238. ASSERT_OVSL();
  239. hlist_del_rcu(&vport->hash_node);
  240. module_put(vport->ops->owner);
  241. vport->ops->destroy(vport);
  242. }
  243. /**
  244. * ovs_vport_get_stats - retrieve device stats
  245. *
  246. * @vport: vport from which to retrieve the stats
  247. * @stats: location to store stats
  248. *
  249. * Retrieves transmit, receive, and error stats for the given device.
  250. *
  251. * Must be called with ovs_mutex or rcu_read_lock.
  252. */
  253. void ovs_vport_get_stats(struct vport *vport, struct ovs_vport_stats *stats)
  254. {
  255. int i;
  256. memset(stats, 0, sizeof(*stats));
  257. /* We potentially have 2 sources of stats that need to be combined:
  258. * those we have collected (split into err_stats and percpu_stats) from
  259. * set_stats() and device error stats from netdev->get_stats() (for
  260. * errors that happen downstream and therefore aren't reported through
  261. * our vport_record_error() function).
  262. * Stats from first source are reported by ovs (OVS_VPORT_ATTR_STATS).
  263. * netdev-stats can be directly read over netlink-ioctl.
  264. */
  265. stats->rx_errors = atomic_long_read(&vport->err_stats.rx_errors);
  266. stats->tx_errors = atomic_long_read(&vport->err_stats.tx_errors);
  267. stats->tx_dropped = atomic_long_read(&vport->err_stats.tx_dropped);
  268. stats->rx_dropped = atomic_long_read(&vport->err_stats.rx_dropped);
  269. for_each_possible_cpu(i) {
  270. const struct pcpu_sw_netstats *percpu_stats;
  271. struct pcpu_sw_netstats local_stats;
  272. unsigned int start;
  273. percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
  274. do {
  275. start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
  276. local_stats = *percpu_stats;
  277. } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
  278. stats->rx_bytes += local_stats.rx_bytes;
  279. stats->rx_packets += local_stats.rx_packets;
  280. stats->tx_bytes += local_stats.tx_bytes;
  281. stats->tx_packets += local_stats.tx_packets;
  282. }
  283. }
  284. /**
  285. * ovs_vport_get_options - retrieve device options
  286. *
  287. * @vport: vport from which to retrieve the options.
  288. * @skb: sk_buff where options should be appended.
  289. *
  290. * Retrieves the configuration of the given device, appending an
  291. * %OVS_VPORT_ATTR_OPTIONS attribute that in turn contains nested
  292. * vport-specific attributes to @skb.
  293. *
  294. * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room, or another
  295. * negative error code if a real error occurred. If an error occurs, @skb is
  296. * left unmodified.
  297. *
  298. * Must be called with ovs_mutex or rcu_read_lock.
  299. */
  300. int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
  301. {
  302. struct nlattr *nla;
  303. int err;
  304. if (!vport->ops->get_options)
  305. return 0;
  306. nla = nla_nest_start(skb, OVS_VPORT_ATTR_OPTIONS);
  307. if (!nla)
  308. return -EMSGSIZE;
  309. err = vport->ops->get_options(vport, skb);
  310. if (err) {
  311. nla_nest_cancel(skb, nla);
  312. return err;
  313. }
  314. nla_nest_end(skb, nla);
  315. return 0;
  316. }
  317. /**
  318. * ovs_vport_set_upcall_portids - set upcall portids of @vport.
  319. *
  320. * @vport: vport to modify.
  321. * @ids: new configuration, an array of port ids.
  322. *
  323. * Sets the vport's upcall_portids to @ids.
  324. *
  325. * Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed
  326. * as an array of U32.
  327. *
  328. * Must be called with ovs_mutex.
  329. */
  330. int ovs_vport_set_upcall_portids(struct vport *vport, const struct nlattr *ids)
  331. {
  332. struct vport_portids *old, *vport_portids;
  333. if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
  334. return -EINVAL;
  335. old = ovsl_dereference(vport->upcall_portids);
  336. vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids),
  337. GFP_KERNEL);
  338. if (!vport_portids)
  339. return -ENOMEM;
  340. vport_portids->n_ids = nla_len(ids) / sizeof(u32);
  341. vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
  342. nla_memcpy(vport_portids->ids, ids, nla_len(ids));
  343. rcu_assign_pointer(vport->upcall_portids, vport_portids);
  344. if (old)
  345. kfree_rcu(old, rcu);
  346. return 0;
  347. }
  348. /**
  349. * ovs_vport_get_upcall_portids - get the upcall_portids of @vport.
  350. *
  351. * @vport: vport from which to retrieve the portids.
  352. * @skb: sk_buff where portids should be appended.
  353. *
  354. * Retrieves the configuration of the given vport, appending the
  355. * %OVS_VPORT_ATTR_UPCALL_PID attribute which is the array of upcall
  356. * portids to @skb.
  357. *
  358. * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room.
  359. * If an error occurs, @skb is left unmodified. Must be called with
  360. * ovs_mutex or rcu_read_lock.
  361. */
  362. int ovs_vport_get_upcall_portids(const struct vport *vport,
  363. struct sk_buff *skb)
  364. {
  365. struct vport_portids *ids;
  366. ids = rcu_dereference_ovsl(vport->upcall_portids);
  367. if (vport->dp->user_features & OVS_DP_F_VPORT_PIDS)
  368. return nla_put(skb, OVS_VPORT_ATTR_UPCALL_PID,
  369. ids->n_ids * sizeof(u32), (void *)ids->ids);
  370. else
  371. return nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, ids->ids[0]);
  372. }
  373. /**
  374. * ovs_vport_find_upcall_portid - find the upcall portid to send upcall.
  375. *
  376. * @vport: vport from which the missed packet is received.
  377. * @skb: skb that the missed packet was received.
  378. *
  379. * Uses the skb_get_hash() to select the upcall portid to send the
  380. * upcall.
  381. *
  382. * Returns the portid of the target socket. Must be called with rcu_read_lock.
  383. */
  384. u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
  385. {
  386. struct vport_portids *ids;
  387. u32 ids_index;
  388. u32 hash;
  389. ids = rcu_dereference(vport->upcall_portids);
  390. if (ids->n_ids == 1 && ids->ids[0] == 0)
  391. return 0;
  392. hash = skb_get_hash(skb);
  393. ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids);
  394. return ids->ids[ids_index];
  395. }
  396. /**
  397. * ovs_vport_receive - pass up received packet to the datapath for processing
  398. *
  399. * @vport: vport that received the packet
  400. * @skb: skb that was received
  401. * @tun_key: tunnel (if any) that carried packet
  402. *
  403. * Must be called with rcu_read_lock. The packet cannot be shared and
  404. * skb->data should point to the Ethernet header.
  405. */
  406. void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
  407. const struct ovs_tunnel_info *tun_info)
  408. {
  409. struct pcpu_sw_netstats *stats;
  410. struct sw_flow_key key;
  411. int error;
  412. stats = this_cpu_ptr(vport->percpu_stats);
  413. u64_stats_update_begin(&stats->syncp);
  414. stats->rx_packets++;
  415. stats->rx_bytes += skb->len +
  416. (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
  417. u64_stats_update_end(&stats->syncp);
  418. OVS_CB(skb)->input_vport = vport;
  419. OVS_CB(skb)->egress_tun_info = NULL;
  420. /* Extract flow from 'skb' into 'key'. */
  421. error = ovs_flow_key_extract(tun_info, skb, &key);
  422. if (unlikely(error)) {
  423. kfree_skb(skb);
  424. return;
  425. }
  426. ovs_dp_process_packet(skb, &key);
  427. }
  428. EXPORT_SYMBOL_GPL(ovs_vport_receive);
  429. /**
  430. * ovs_vport_send - send a packet on a device
  431. *
  432. * @vport: vport on which to send the packet
  433. * @skb: skb to send
  434. *
  435. * Sends the given packet and returns the length of data sent. Either ovs
  436. * lock or rcu_read_lock must be held.
  437. */
  438. int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
  439. {
  440. int sent = vport->ops->send(vport, skb);
  441. if (likely(sent > 0)) {
  442. struct pcpu_sw_netstats *stats;
  443. stats = this_cpu_ptr(vport->percpu_stats);
  444. u64_stats_update_begin(&stats->syncp);
  445. stats->tx_packets++;
  446. stats->tx_bytes += sent;
  447. u64_stats_update_end(&stats->syncp);
  448. } else if (sent < 0) {
  449. ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
  450. } else {
  451. ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
  452. }
  453. return sent;
  454. }
  455. /**
  456. * ovs_vport_record_error - indicate device error to generic stats layer
  457. *
  458. * @vport: vport that encountered the error
  459. * @err_type: one of enum vport_err_type types to indicate the error type
  460. *
  461. * If using the vport generic stats layer indicate that an error of the given
  462. * type has occurred.
  463. */
  464. static void ovs_vport_record_error(struct vport *vport,
  465. enum vport_err_type err_type)
  466. {
  467. switch (err_type) {
  468. case VPORT_E_RX_DROPPED:
  469. atomic_long_inc(&vport->err_stats.rx_dropped);
  470. break;
  471. case VPORT_E_RX_ERROR:
  472. atomic_long_inc(&vport->err_stats.rx_errors);
  473. break;
  474. case VPORT_E_TX_DROPPED:
  475. atomic_long_inc(&vport->err_stats.tx_dropped);
  476. break;
  477. case VPORT_E_TX_ERROR:
  478. atomic_long_inc(&vport->err_stats.tx_errors);
  479. break;
  480. }
  481. }
  482. static void free_vport_rcu(struct rcu_head *rcu)
  483. {
  484. struct vport *vport = container_of(rcu, struct vport, rcu);
  485. ovs_vport_free(vport);
  486. }
  487. void ovs_vport_deferred_free(struct vport *vport)
  488. {
  489. if (!vport)
  490. return;
  491. call_rcu(&vport->rcu, free_vport_rcu);
  492. }
  493. EXPORT_SYMBOL_GPL(ovs_vport_deferred_free);
  494. int ovs_tunnel_get_egress_info(struct ovs_tunnel_info *egress_tun_info,
  495. struct net *net,
  496. const struct ovs_tunnel_info *tun_info,
  497. u8 ipproto,
  498. u32 skb_mark,
  499. __be16 tp_src,
  500. __be16 tp_dst)
  501. {
  502. const struct ovs_key_ipv4_tunnel *tun_key;
  503. struct rtable *rt;
  504. struct flowi4 fl;
  505. if (unlikely(!tun_info))
  506. return -EINVAL;
  507. tun_key = &tun_info->tunnel;
  508. /* Route lookup to get srouce IP address.
  509. * The process may need to be changed if the corresponding process
  510. * in vports ops changed.
  511. */
  512. rt = ovs_tunnel_route_lookup(net, tun_key, skb_mark, &fl, ipproto);
  513. if (IS_ERR(rt))
  514. return PTR_ERR(rt);
  515. ip_rt_put(rt);
  516. /* Generate egress_tun_info based on tun_info,
  517. * saddr, tp_src and tp_dst
  518. */
  519. __ovs_flow_tun_info_init(egress_tun_info,
  520. fl.saddr, tun_key->ipv4_dst,
  521. tun_key->ipv4_tos,
  522. tun_key->ipv4_ttl,
  523. tp_src, tp_dst,
  524. tun_key->tun_id,
  525. tun_key->tun_flags,
  526. tun_info->options,
  527. tun_info->options_len);
  528. return 0;
  529. }
  530. EXPORT_SYMBOL_GPL(ovs_tunnel_get_egress_info);
  531. int ovs_vport_get_egress_tun_info(struct vport *vport, struct sk_buff *skb,
  532. struct ovs_tunnel_info *info)
  533. {
  534. /* get_egress_tun_info() is only implemented on tunnel ports. */
  535. if (unlikely(!vport->ops->get_egress_tun_info))
  536. return -EINVAL;
  537. return vport->ops->get_egress_tun_info(vport, skb, info);
  538. }