core_priv.h 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #ifndef _CORE_PRIV_H
  33. #define _CORE_PRIV_H
  34. #include <linux/list.h>
  35. #include <linux/spinlock.h>
  36. #include <linux/cgroup_rdma.h>
  37. #include <rdma/ib_verbs.h>
  38. #include <rdma/opa_addr.h>
  39. #include <rdma/ib_mad.h>
  40. #include <rdma/restrack.h>
  41. #include "mad_priv.h"
  42. /* Total number of ports combined across all struct ib_devices's */
  43. #define RDMA_MAX_PORTS 1024
  44. struct pkey_index_qp_list {
  45. struct list_head pkey_index_list;
  46. u16 pkey_index;
  47. /* Lock to hold while iterating the qp_list. */
  48. spinlock_t qp_list_lock;
  49. struct list_head qp_list;
  50. };
  51. #if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS)
  52. int cma_configfs_init(void);
  53. void cma_configfs_exit(void);
  54. #else
  55. static inline int cma_configfs_init(void)
  56. {
  57. return 0;
  58. }
  59. static inline void cma_configfs_exit(void)
  60. {
  61. }
  62. #endif
  63. struct cma_device;
  64. void cma_ref_dev(struct cma_device *cma_dev);
  65. void cma_deref_dev(struct cma_device *cma_dev);
  66. typedef bool (*cma_device_filter)(struct ib_device *, void *);
  67. struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
  68. void *cookie);
  69. int cma_get_default_gid_type(struct cma_device *cma_dev,
  70. unsigned int port);
  71. int cma_set_default_gid_type(struct cma_device *cma_dev,
  72. unsigned int port,
  73. enum ib_gid_type default_gid_type);
  74. int cma_get_default_roce_tos(struct cma_device *cma_dev, unsigned int port);
  75. int cma_set_default_roce_tos(struct cma_device *a_dev, unsigned int port,
  76. u8 default_roce_tos);
  77. struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev);
  78. int ib_device_register_sysfs(struct ib_device *device,
  79. int (*port_callback)(struct ib_device *,
  80. u8, struct kobject *));
  81. void ib_device_unregister_sysfs(struct ib_device *device);
  82. typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
  83. struct net_device *idev, void *cookie);
  84. typedef bool (*roce_netdev_filter)(struct ib_device *device, u8 port,
  85. struct net_device *idev, void *cookie);
  86. void ib_enum_roce_netdev(struct ib_device *ib_dev,
  87. roce_netdev_filter filter,
  88. void *filter_cookie,
  89. roce_netdev_callback cb,
  90. void *cookie);
  91. void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
  92. void *filter_cookie,
  93. roce_netdev_callback cb,
  94. void *cookie);
  95. typedef int (*nldev_callback)(struct ib_device *device,
  96. struct sk_buff *skb,
  97. struct netlink_callback *cb,
  98. unsigned int idx);
  99. int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
  100. struct netlink_callback *cb);
  101. enum ib_cache_gid_default_mode {
  102. IB_CACHE_GID_DEFAULT_MODE_SET,
  103. IB_CACHE_GID_DEFAULT_MODE_DELETE
  104. };
  105. int ib_cache_gid_parse_type_str(const char *buf);
  106. const char *ib_cache_gid_type_str(enum ib_gid_type gid_type);
  107. void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
  108. struct net_device *ndev,
  109. unsigned long gid_type_mask,
  110. enum ib_cache_gid_default_mode mode);
  111. int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
  112. union ib_gid *gid, struct ib_gid_attr *attr);
  113. int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
  114. union ib_gid *gid, struct ib_gid_attr *attr);
  115. int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
  116. struct net_device *ndev);
  117. int roce_gid_mgmt_init(void);
  118. void roce_gid_mgmt_cleanup(void);
  119. unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port);
  120. int ib_cache_setup_one(struct ib_device *device);
  121. void ib_cache_cleanup_one(struct ib_device *device);
  122. void ib_cache_release_one(struct ib_device *device);
  123. #ifdef CONFIG_CGROUP_RDMA
  124. int ib_device_register_rdmacg(struct ib_device *device);
  125. void ib_device_unregister_rdmacg(struct ib_device *device);
  126. int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj,
  127. struct ib_device *device,
  128. enum rdmacg_resource_type resource_index);
  129. void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj,
  130. struct ib_device *device,
  131. enum rdmacg_resource_type resource_index);
  132. #else
  133. static inline int ib_device_register_rdmacg(struct ib_device *device)
  134. { return 0; }
  135. static inline void ib_device_unregister_rdmacg(struct ib_device *device)
  136. { }
  137. static inline int ib_rdmacg_try_charge(struct ib_rdmacg_object *cg_obj,
  138. struct ib_device *device,
  139. enum rdmacg_resource_type resource_index)
  140. { return 0; }
  141. static inline void ib_rdmacg_uncharge(struct ib_rdmacg_object *cg_obj,
  142. struct ib_device *device,
  143. enum rdmacg_resource_type resource_index)
  144. { }
  145. #endif
  146. static inline bool rdma_is_upper_dev_rcu(struct net_device *dev,
  147. struct net_device *upper)
  148. {
  149. return netdev_has_upper_dev_all_rcu(dev, upper);
  150. }
  151. int addr_init(void);
  152. void addr_cleanup(void);
  153. int ib_mad_init(void);
  154. void ib_mad_cleanup(void);
  155. int ib_sa_init(void);
  156. void ib_sa_cleanup(void);
  157. int rdma_nl_init(void);
  158. void rdma_nl_exit(void);
  159. int ib_nl_handle_resolve_resp(struct sk_buff *skb,
  160. struct nlmsghdr *nlh,
  161. struct netlink_ext_ack *extack);
  162. int ib_nl_handle_set_timeout(struct sk_buff *skb,
  163. struct nlmsghdr *nlh,
  164. struct netlink_ext_ack *extack);
  165. int ib_nl_handle_ip_res_resp(struct sk_buff *skb,
  166. struct nlmsghdr *nlh,
  167. struct netlink_ext_ack *extack);
  168. int ib_get_cached_subnet_prefix(struct ib_device *device,
  169. u8 port_num,
  170. u64 *sn_pfx);
  171. #ifdef CONFIG_SECURITY_INFINIBAND
  172. void ib_security_destroy_port_pkey_list(struct ib_device *device);
  173. void ib_security_cache_change(struct ib_device *device,
  174. u8 port_num,
  175. u64 subnet_prefix);
  176. int ib_security_modify_qp(struct ib_qp *qp,
  177. struct ib_qp_attr *qp_attr,
  178. int qp_attr_mask,
  179. struct ib_udata *udata);
  180. int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev);
  181. void ib_destroy_qp_security_begin(struct ib_qp_security *sec);
  182. void ib_destroy_qp_security_abort(struct ib_qp_security *sec);
  183. void ib_destroy_qp_security_end(struct ib_qp_security *sec);
  184. int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev);
  185. void ib_close_shared_qp_security(struct ib_qp_security *sec);
  186. int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
  187. enum ib_qp_type qp_type);
  188. void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent);
  189. int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index);
  190. #else
  191. static inline void ib_security_destroy_port_pkey_list(struct ib_device *device)
  192. {
  193. }
  194. static inline void ib_security_cache_change(struct ib_device *device,
  195. u8 port_num,
  196. u64 subnet_prefix)
  197. {
  198. }
  199. static inline int ib_security_modify_qp(struct ib_qp *qp,
  200. struct ib_qp_attr *qp_attr,
  201. int qp_attr_mask,
  202. struct ib_udata *udata)
  203. {
  204. return qp->device->modify_qp(qp->real_qp,
  205. qp_attr,
  206. qp_attr_mask,
  207. udata);
  208. }
  209. static inline int ib_create_qp_security(struct ib_qp *qp,
  210. struct ib_device *dev)
  211. {
  212. return 0;
  213. }
  214. static inline void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
  215. {
  216. }
  217. static inline void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
  218. {
  219. }
  220. static inline void ib_destroy_qp_security_end(struct ib_qp_security *sec)
  221. {
  222. }
  223. static inline int ib_open_shared_qp_security(struct ib_qp *qp,
  224. struct ib_device *dev)
  225. {
  226. return 0;
  227. }
  228. static inline void ib_close_shared_qp_security(struct ib_qp_security *sec)
  229. {
  230. }
  231. static inline int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
  232. enum ib_qp_type qp_type)
  233. {
  234. return 0;
  235. }
  236. static inline void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
  237. {
  238. }
  239. static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
  240. u16 pkey_index)
  241. {
  242. return 0;
  243. }
  244. #endif
  245. struct ib_device *ib_device_get_by_index(u32 ifindex);
  246. /* RDMA device netlink */
  247. void nldev_init(void);
  248. void nldev_exit(void);
  249. static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
  250. struct ib_pd *pd,
  251. struct ib_qp_init_attr *attr,
  252. struct ib_udata *udata,
  253. struct ib_uobject *uobj)
  254. {
  255. struct ib_qp *qp;
  256. if (!dev->create_qp)
  257. return ERR_PTR(-EOPNOTSUPP);
  258. qp = dev->create_qp(pd, attr, udata);
  259. if (IS_ERR(qp))
  260. return qp;
  261. qp->device = dev;
  262. qp->pd = pd;
  263. qp->uobject = uobj;
  264. /*
  265. * We don't track XRC QPs for now, because they don't have PD
  266. * and more importantly they are created internaly by driver,
  267. * see mlx5 create_dev_resources() as an example.
  268. */
  269. if (attr->qp_type < IB_QPT_XRC_INI) {
  270. qp->res.type = RDMA_RESTRACK_QP;
  271. rdma_restrack_add(&qp->res);
  272. } else
  273. qp->res.valid = false;
  274. return qp;
  275. }
  276. struct rdma_dev_addr;
  277. int rdma_resolve_ip_route(struct sockaddr *src_addr,
  278. const struct sockaddr *dst_addr,
  279. struct rdma_dev_addr *addr);
  280. int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
  281. const union ib_gid *dgid,
  282. u8 *dmac, const struct net_device *ndev,
  283. int *hoplimit);
  284. #endif /* _CORE_PRIV_H */