diag.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. #include <linux/module.h>
  2. #include <linux/sock_diag.h>
  3. #include <linux/net.h>
  4. #include <linux/netdevice.h>
  5. #include <linux/packet_diag.h>
  6. #include <linux/percpu.h>
  7. #include <net/net_namespace.h>
  8. #include <net/sock.h>
  9. #include "internal.h"
  10. static int pdiag_put_info(const struct packet_sock *po, struct sk_buff *nlskb)
  11. {
  12. struct packet_diag_info pinfo;
  13. pinfo.pdi_index = po->ifindex;
  14. pinfo.pdi_version = po->tp_version;
  15. pinfo.pdi_reserve = po->tp_reserve;
  16. pinfo.pdi_copy_thresh = po->copy_thresh;
  17. pinfo.pdi_tstamp = po->tp_tstamp;
  18. pinfo.pdi_flags = 0;
  19. if (po->running)
  20. pinfo.pdi_flags |= PDI_RUNNING;
  21. if (po->auxdata)
  22. pinfo.pdi_flags |= PDI_AUXDATA;
  23. if (po->origdev)
  24. pinfo.pdi_flags |= PDI_ORIGDEV;
  25. if (po->has_vnet_hdr)
  26. pinfo.pdi_flags |= PDI_VNETHDR;
  27. if (po->tp_loss)
  28. pinfo.pdi_flags |= PDI_LOSS;
  29. return nla_put(nlskb, PACKET_DIAG_INFO, sizeof(pinfo), &pinfo);
  30. }
  31. static int pdiag_put_mclist(const struct packet_sock *po, struct sk_buff *nlskb)
  32. {
  33. struct nlattr *mca;
  34. struct packet_mclist *ml;
  35. mca = nla_nest_start(nlskb, PACKET_DIAG_MCLIST);
  36. if (!mca)
  37. return -EMSGSIZE;
  38. rtnl_lock();
  39. for (ml = po->mclist; ml; ml = ml->next) {
  40. struct packet_diag_mclist *dml;
  41. dml = nla_reserve_nohdr(nlskb, sizeof(*dml));
  42. if (!dml) {
  43. rtnl_unlock();
  44. nla_nest_cancel(nlskb, mca);
  45. return -EMSGSIZE;
  46. }
  47. dml->pdmc_index = ml->ifindex;
  48. dml->pdmc_type = ml->type;
  49. dml->pdmc_alen = ml->alen;
  50. dml->pdmc_count = ml->count;
  51. BUILD_BUG_ON(sizeof(dml->pdmc_addr) != sizeof(ml->addr));
  52. memcpy(dml->pdmc_addr, ml->addr, sizeof(ml->addr));
  53. }
  54. rtnl_unlock();
  55. nla_nest_end(nlskb, mca);
  56. return 0;
  57. }
  58. static int pdiag_put_ring(struct packet_ring_buffer *ring, int ver, int nl_type,
  59. struct sk_buff *nlskb)
  60. {
  61. struct packet_diag_ring pdr;
  62. if (!ring->pg_vec)
  63. return 0;
  64. pdr.pdr_block_size = ring->pg_vec_pages << PAGE_SHIFT;
  65. pdr.pdr_block_nr = ring->pg_vec_len;
  66. pdr.pdr_frame_size = ring->frame_size;
  67. pdr.pdr_frame_nr = ring->frame_max + 1;
  68. if (ver > TPACKET_V2) {
  69. pdr.pdr_retire_tmo = ring->prb_bdqc.retire_blk_tov;
  70. pdr.pdr_sizeof_priv = ring->prb_bdqc.blk_sizeof_priv;
  71. pdr.pdr_features = ring->prb_bdqc.feature_req_word;
  72. } else {
  73. pdr.pdr_retire_tmo = 0;
  74. pdr.pdr_sizeof_priv = 0;
  75. pdr.pdr_features = 0;
  76. }
  77. return nla_put(nlskb, nl_type, sizeof(pdr), &pdr);
  78. }
  79. static int pdiag_put_rings_cfg(struct packet_sock *po, struct sk_buff *skb)
  80. {
  81. int ret;
  82. mutex_lock(&po->pg_vec_lock);
  83. ret = pdiag_put_ring(&po->rx_ring, po->tp_version,
  84. PACKET_DIAG_RX_RING, skb);
  85. if (!ret)
  86. ret = pdiag_put_ring(&po->tx_ring, po->tp_version,
  87. PACKET_DIAG_TX_RING, skb);
  88. mutex_unlock(&po->pg_vec_lock);
  89. return ret;
  90. }
  91. static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
  92. {
  93. int ret = 0;
  94. mutex_lock(&fanout_mutex);
  95. if (po->fanout) {
  96. u32 val;
  97. val = (u32)po->fanout->id | ((u32)po->fanout->type << 16);
  98. ret = nla_put_u32(nlskb, PACKET_DIAG_FANOUT, val);
  99. }
  100. mutex_unlock(&fanout_mutex);
  101. return ret;
  102. }
  103. static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
  104. struct packet_diag_req *req,
  105. bool may_report_filterinfo,
  106. struct user_namespace *user_ns,
  107. u32 portid, u32 seq, u32 flags, int sk_ino)
  108. {
  109. struct nlmsghdr *nlh;
  110. struct packet_diag_msg *rp;
  111. struct packet_sock *po = pkt_sk(sk);
  112. nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rp), flags);
  113. if (!nlh)
  114. return -EMSGSIZE;
  115. rp = nlmsg_data(nlh);
  116. rp->pdiag_family = AF_PACKET;
  117. rp->pdiag_type = sk->sk_type;
  118. rp->pdiag_num = ntohs(po->num);
  119. rp->pdiag_ino = sk_ino;
  120. sock_diag_save_cookie(sk, rp->pdiag_cookie);
  121. if ((req->pdiag_show & PACKET_SHOW_INFO) &&
  122. pdiag_put_info(po, skb))
  123. goto out_nlmsg_trim;
  124. if ((req->pdiag_show & PACKET_SHOW_INFO) &&
  125. nla_put_u32(skb, PACKET_DIAG_UID,
  126. from_kuid_munged(user_ns, sock_i_uid(sk))))
  127. goto out_nlmsg_trim;
  128. if ((req->pdiag_show & PACKET_SHOW_MCLIST) &&
  129. pdiag_put_mclist(po, skb))
  130. goto out_nlmsg_trim;
  131. if ((req->pdiag_show & PACKET_SHOW_RING_CFG) &&
  132. pdiag_put_rings_cfg(po, skb))
  133. goto out_nlmsg_trim;
  134. if ((req->pdiag_show & PACKET_SHOW_FANOUT) &&
  135. pdiag_put_fanout(po, skb))
  136. goto out_nlmsg_trim;
  137. if ((req->pdiag_show & PACKET_SHOW_MEMINFO) &&
  138. sock_diag_put_meminfo(sk, skb, PACKET_DIAG_MEMINFO))
  139. goto out_nlmsg_trim;
  140. if ((req->pdiag_show & PACKET_SHOW_FILTER) &&
  141. sock_diag_put_filterinfo(may_report_filterinfo, sk, skb,
  142. PACKET_DIAG_FILTER))
  143. goto out_nlmsg_trim;
  144. nlmsg_end(skb, nlh);
  145. return 0;
  146. out_nlmsg_trim:
  147. nlmsg_cancel(skb, nlh);
  148. return -EMSGSIZE;
  149. }
  150. static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
  151. {
  152. int num = 0, s_num = cb->args[0];
  153. struct packet_diag_req *req;
  154. struct net *net;
  155. struct sock *sk;
  156. bool may_report_filterinfo;
  157. net = sock_net(skb->sk);
  158. req = nlmsg_data(cb->nlh);
  159. may_report_filterinfo = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
  160. mutex_lock(&net->packet.sklist_lock);
  161. sk_for_each(sk, &net->packet.sklist) {
  162. if (!net_eq(sock_net(sk), net))
  163. continue;
  164. if (num < s_num)
  165. goto next;
  166. if (sk_diag_fill(sk, skb, req,
  167. may_report_filterinfo,
  168. sk_user_ns(NETLINK_CB(cb->skb).sk),
  169. NETLINK_CB(cb->skb).portid,
  170. cb->nlh->nlmsg_seq, NLM_F_MULTI,
  171. sock_i_ino(sk)) < 0)
  172. goto done;
  173. next:
  174. num++;
  175. }
  176. done:
  177. mutex_unlock(&net->packet.sklist_lock);
  178. cb->args[0] = num;
  179. return skb->len;
  180. }
  181. static int packet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
  182. {
  183. int hdrlen = sizeof(struct packet_diag_req);
  184. struct net *net = sock_net(skb->sk);
  185. struct packet_diag_req *req;
  186. if (nlmsg_len(h) < hdrlen)
  187. return -EINVAL;
  188. req = nlmsg_data(h);
  189. /* Make it possible to support protocol filtering later */
  190. if (req->sdiag_protocol)
  191. return -EINVAL;
  192. if (h->nlmsg_flags & NLM_F_DUMP) {
  193. struct netlink_dump_control c = {
  194. .dump = packet_diag_dump,
  195. };
  196. return netlink_dump_start(net->diag_nlsk, skb, h, &c);
  197. } else
  198. return -EOPNOTSUPP;
  199. }
  200. static const struct sock_diag_handler packet_diag_handler = {
  201. .family = AF_PACKET,
  202. .dump = packet_diag_handler_dump,
  203. };
  204. static int __init packet_diag_init(void)
  205. {
  206. return sock_diag_register(&packet_diag_handler);
  207. }
  208. static void __exit packet_diag_exit(void)
  209. {
  210. sock_diag_unregister(&packet_diag_handler);
  211. }
  212. module_init(packet_diag_init);
  213. module_exit(packet_diag_exit);
  214. MODULE_LICENSE("GPL");
  215. MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 17 /* AF_PACKET */);