raw.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893
  1. /*
  2. * raw.c - Raw sockets for protocol family CAN
  3. *
  4. * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the name of Volkswagen nor the names of its contributors
  16. * may be used to endorse or promote products derived from this software
  17. * without specific prior written permission.
  18. *
  19. * Alternatively, provided that this notice is retained in full, this
  20. * software may be distributed under the terms of the GNU General
  21. * Public License ("GPL") version 2, in which case the provisions of the
  22. * GPL apply INSTEAD OF those given above.
  23. *
  24. * The provided data structures and external interfaces from this code
  25. * are not restricted to be used by modules with a GPL compatible license.
  26. *
  27. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  28. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  29. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  30. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  31. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  32. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  33. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  34. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  35. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  36. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  37. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  38. * DAMAGE.
  39. *
  40. */
  41. #include <linux/module.h>
  42. #include <linux/init.h>
  43. #include <linux/uio.h>
  44. #include <linux/net.h>
  45. #include <linux/slab.h>
  46. #include <linux/netdevice.h>
  47. #include <linux/socket.h>
  48. #include <linux/if_arp.h>
  49. #include <linux/skbuff.h>
  50. #include <linux/can.h>
  51. #include <linux/can/core.h>
  52. #include <linux/can/skb.h>
  53. #include <linux/can/raw.h>
  54. #include <net/sock.h>
  55. #include <net/net_namespace.h>
  56. #define CAN_RAW_VERSION CAN_VERSION
  57. MODULE_DESCRIPTION("PF_CAN raw protocol");
  58. MODULE_LICENSE("Dual BSD/GPL");
  59. MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
  60. MODULE_ALIAS("can-proto-1");
  61. #define MASK_ALL 0
  62. /*
  63. * A raw socket has a list of can_filters attached to it, each receiving
  64. * the CAN frames matching that filter. If the filter list is empty,
  65. * no CAN frames will be received by the socket. The default after
  66. * opening the socket, is to have one filter which receives all frames.
  67. * The filter list is allocated dynamically with the exception of the
  68. * list containing only one item. This common case is optimized by
  69. * storing the single filter in dfilter, to avoid using dynamic memory.
  70. */
  71. struct uniqframe {
  72. int skbcnt;
  73. const struct sk_buff *skb;
  74. unsigned int join_rx_count;
  75. };
  76. struct raw_sock {
  77. struct sock sk;
  78. int bound;
  79. int ifindex;
  80. struct notifier_block notifier;
  81. int loopback;
  82. int recv_own_msgs;
  83. int fd_frames;
  84. int join_filters;
  85. int count; /* number of active filters */
  86. struct can_filter dfilter; /* default/single filter */
  87. struct can_filter *filter; /* pointer to filter(s) */
  88. can_err_mask_t err_mask;
  89. struct uniqframe __percpu *uniq;
  90. };
  91. /*
  92. * Return pointer to store the extra msg flags for raw_recvmsg().
  93. * We use the space of one unsigned int beyond the 'struct sockaddr_can'
  94. * in skb->cb.
  95. */
  96. static inline unsigned int *raw_flags(struct sk_buff *skb)
  97. {
  98. sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
  99. sizeof(unsigned int));
  100. /* return pointer after struct sockaddr_can */
  101. return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
  102. }
  103. static inline struct raw_sock *raw_sk(const struct sock *sk)
  104. {
  105. return (struct raw_sock *)sk;
  106. }
  107. static void raw_rcv(struct sk_buff *oskb, void *data)
  108. {
  109. struct sock *sk = (struct sock *)data;
  110. struct raw_sock *ro = raw_sk(sk);
  111. struct sockaddr_can *addr;
  112. struct sk_buff *skb;
  113. unsigned int *pflags;
  114. /* check the received tx sock reference */
  115. if (!ro->recv_own_msgs && oskb->sk == sk)
  116. return;
  117. /* do not pass non-CAN2.0 frames to a legacy socket */
  118. if (!ro->fd_frames && oskb->len != CAN_MTU)
  119. return;
  120. /* eliminate multiple filter matches for the same skb */
  121. if (this_cpu_ptr(ro->uniq)->skb == oskb &&
  122. this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
  123. if (ro->join_filters) {
  124. this_cpu_inc(ro->uniq->join_rx_count);
  125. /* drop frame until all enabled filters matched */
  126. if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
  127. return;
  128. } else {
  129. return;
  130. }
  131. } else {
  132. this_cpu_ptr(ro->uniq)->skb = oskb;
  133. this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
  134. this_cpu_ptr(ro->uniq)->join_rx_count = 1;
  135. /* drop first frame to check all enabled filters? */
  136. if (ro->join_filters && ro->count > 1)
  137. return;
  138. }
  139. /* clone the given skb to be able to enqueue it into the rcv queue */
  140. skb = skb_clone(oskb, GFP_ATOMIC);
  141. if (!skb)
  142. return;
  143. /*
  144. * Put the datagram to the queue so that raw_recvmsg() can
  145. * get it from there. We need to pass the interface index to
  146. * raw_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
  147. * containing the interface index.
  148. */
  149. sock_skb_cb_check_size(sizeof(struct sockaddr_can));
  150. addr = (struct sockaddr_can *)skb->cb;
  151. memset(addr, 0, sizeof(*addr));
  152. addr->can_family = AF_CAN;
  153. addr->can_ifindex = skb->dev->ifindex;
  154. /* add CAN specific message flags for raw_recvmsg() */
  155. pflags = raw_flags(skb);
  156. *pflags = 0;
  157. if (oskb->sk)
  158. *pflags |= MSG_DONTROUTE;
  159. if (oskb->sk == sk)
  160. *pflags |= MSG_CONFIRM;
  161. if (sock_queue_rcv_skb(sk, skb) < 0)
  162. kfree_skb(skb);
  163. }
  164. static int raw_enable_filters(struct net *net, struct net_device *dev,
  165. struct sock *sk, struct can_filter *filter,
  166. int count)
  167. {
  168. int err = 0;
  169. int i;
  170. for (i = 0; i < count; i++) {
  171. err = can_rx_register(net, dev, filter[i].can_id,
  172. filter[i].can_mask,
  173. raw_rcv, sk, "raw", sk);
  174. if (err) {
  175. /* clean up successfully registered filters */
  176. while (--i >= 0)
  177. can_rx_unregister(net, dev, filter[i].can_id,
  178. filter[i].can_mask,
  179. raw_rcv, sk);
  180. break;
  181. }
  182. }
  183. return err;
  184. }
  185. static int raw_enable_errfilter(struct net *net, struct net_device *dev,
  186. struct sock *sk, can_err_mask_t err_mask)
  187. {
  188. int err = 0;
  189. if (err_mask)
  190. err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG,
  191. raw_rcv, sk, "raw", sk);
  192. return err;
  193. }
  194. static void raw_disable_filters(struct net *net, struct net_device *dev,
  195. struct sock *sk, struct can_filter *filter,
  196. int count)
  197. {
  198. int i;
  199. for (i = 0; i < count; i++)
  200. can_rx_unregister(net, dev, filter[i].can_id,
  201. filter[i].can_mask, raw_rcv, sk);
  202. }
  203. static inline void raw_disable_errfilter(struct net *net,
  204. struct net_device *dev,
  205. struct sock *sk,
  206. can_err_mask_t err_mask)
  207. {
  208. if (err_mask)
  209. can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG,
  210. raw_rcv, sk);
  211. }
  212. static inline void raw_disable_allfilters(struct net *net,
  213. struct net_device *dev,
  214. struct sock *sk)
  215. {
  216. struct raw_sock *ro = raw_sk(sk);
  217. raw_disable_filters(net, dev, sk, ro->filter, ro->count);
  218. raw_disable_errfilter(net, dev, sk, ro->err_mask);
  219. }
  220. static int raw_enable_allfilters(struct net *net, struct net_device *dev,
  221. struct sock *sk)
  222. {
  223. struct raw_sock *ro = raw_sk(sk);
  224. int err;
  225. err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
  226. if (!err) {
  227. err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
  228. if (err)
  229. raw_disable_filters(net, dev, sk, ro->filter,
  230. ro->count);
  231. }
  232. return err;
  233. }
  234. static int raw_notifier(struct notifier_block *nb,
  235. unsigned long msg, void *ptr)
  236. {
  237. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  238. struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
  239. struct sock *sk = &ro->sk;
  240. if (!net_eq(dev_net(dev), sock_net(sk)))
  241. return NOTIFY_DONE;
  242. if (dev->type != ARPHRD_CAN)
  243. return NOTIFY_DONE;
  244. if (ro->ifindex != dev->ifindex)
  245. return NOTIFY_DONE;
  246. switch (msg) {
  247. case NETDEV_UNREGISTER:
  248. lock_sock(sk);
  249. /* remove current filters & unregister */
  250. if (ro->bound)
  251. raw_disable_allfilters(dev_net(dev), dev, sk);
  252. if (ro->count > 1)
  253. kfree(ro->filter);
  254. ro->ifindex = 0;
  255. ro->bound = 0;
  256. ro->count = 0;
  257. release_sock(sk);
  258. sk->sk_err = ENODEV;
  259. if (!sock_flag(sk, SOCK_DEAD))
  260. sk->sk_error_report(sk);
  261. break;
  262. case NETDEV_DOWN:
  263. sk->sk_err = ENETDOWN;
  264. if (!sock_flag(sk, SOCK_DEAD))
  265. sk->sk_error_report(sk);
  266. break;
  267. }
  268. return NOTIFY_DONE;
  269. }
  270. static int raw_init(struct sock *sk)
  271. {
  272. struct raw_sock *ro = raw_sk(sk);
  273. ro->bound = 0;
  274. ro->ifindex = 0;
  275. /* set default filter to single entry dfilter */
  276. ro->dfilter.can_id = 0;
  277. ro->dfilter.can_mask = MASK_ALL;
  278. ro->filter = &ro->dfilter;
  279. ro->count = 1;
  280. /* set default loopback behaviour */
  281. ro->loopback = 1;
  282. ro->recv_own_msgs = 0;
  283. ro->fd_frames = 0;
  284. ro->join_filters = 0;
  285. /* alloc_percpu provides zero'ed memory */
  286. ro->uniq = alloc_percpu(struct uniqframe);
  287. if (unlikely(!ro->uniq))
  288. return -ENOMEM;
  289. /* set notifier */
  290. ro->notifier.notifier_call = raw_notifier;
  291. register_netdevice_notifier(&ro->notifier);
  292. return 0;
  293. }
  294. static int raw_release(struct socket *sock)
  295. {
  296. struct sock *sk = sock->sk;
  297. struct raw_sock *ro;
  298. if (!sk)
  299. return 0;
  300. ro = raw_sk(sk);
  301. unregister_netdevice_notifier(&ro->notifier);
  302. lock_sock(sk);
  303. /* remove current filters & unregister */
  304. if (ro->bound) {
  305. if (ro->ifindex) {
  306. struct net_device *dev;
  307. dev = dev_get_by_index(sock_net(sk), ro->ifindex);
  308. if (dev) {
  309. raw_disable_allfilters(dev_net(dev), dev, sk);
  310. dev_put(dev);
  311. }
  312. } else
  313. raw_disable_allfilters(sock_net(sk), NULL, sk);
  314. }
  315. if (ro->count > 1)
  316. kfree(ro->filter);
  317. ro->ifindex = 0;
  318. ro->bound = 0;
  319. ro->count = 0;
  320. free_percpu(ro->uniq);
  321. sock_orphan(sk);
  322. sock->sk = NULL;
  323. release_sock(sk);
  324. sock_put(sk);
  325. return 0;
  326. }
  327. static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
  328. {
  329. struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
  330. struct sock *sk = sock->sk;
  331. struct raw_sock *ro = raw_sk(sk);
  332. int ifindex;
  333. int err = 0;
  334. int notify_enetdown = 0;
  335. if (len < sizeof(*addr))
  336. return -EINVAL;
  337. if (addr->can_family != AF_CAN)
  338. return -EINVAL;
  339. lock_sock(sk);
  340. if (ro->bound && addr->can_ifindex == ro->ifindex)
  341. goto out;
  342. if (addr->can_ifindex) {
  343. struct net_device *dev;
  344. dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
  345. if (!dev) {
  346. err = -ENODEV;
  347. goto out;
  348. }
  349. if (dev->type != ARPHRD_CAN) {
  350. dev_put(dev);
  351. err = -ENODEV;
  352. goto out;
  353. }
  354. if (!(dev->flags & IFF_UP))
  355. notify_enetdown = 1;
  356. ifindex = dev->ifindex;
  357. /* filters set by default/setsockopt */
  358. err = raw_enable_allfilters(sock_net(sk), dev, sk);
  359. dev_put(dev);
  360. } else {
  361. ifindex = 0;
  362. /* filters set by default/setsockopt */
  363. err = raw_enable_allfilters(sock_net(sk), NULL, sk);
  364. }
  365. if (!err) {
  366. if (ro->bound) {
  367. /* unregister old filters */
  368. if (ro->ifindex) {
  369. struct net_device *dev;
  370. dev = dev_get_by_index(sock_net(sk),
  371. ro->ifindex);
  372. if (dev) {
  373. raw_disable_allfilters(dev_net(dev),
  374. dev, sk);
  375. dev_put(dev);
  376. }
  377. } else
  378. raw_disable_allfilters(sock_net(sk), NULL, sk);
  379. }
  380. ro->ifindex = ifindex;
  381. ro->bound = 1;
  382. }
  383. out:
  384. release_sock(sk);
  385. if (notify_enetdown) {
  386. sk->sk_err = ENETDOWN;
  387. if (!sock_flag(sk, SOCK_DEAD))
  388. sk->sk_error_report(sk);
  389. }
  390. return err;
  391. }
  392. static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
  393. int peer)
  394. {
  395. struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
  396. struct sock *sk = sock->sk;
  397. struct raw_sock *ro = raw_sk(sk);
  398. if (peer)
  399. return -EOPNOTSUPP;
  400. memset(addr, 0, sizeof(*addr));
  401. addr->can_family = AF_CAN;
  402. addr->can_ifindex = ro->ifindex;
  403. return sizeof(*addr);
  404. }
  405. static int raw_setsockopt(struct socket *sock, int level, int optname,
  406. char __user *optval, unsigned int optlen)
  407. {
  408. struct sock *sk = sock->sk;
  409. struct raw_sock *ro = raw_sk(sk);
  410. struct can_filter *filter = NULL; /* dyn. alloc'ed filters */
  411. struct can_filter sfilter; /* single filter */
  412. struct net_device *dev = NULL;
  413. can_err_mask_t err_mask = 0;
  414. int count = 0;
  415. int err = 0;
  416. if (level != SOL_CAN_RAW)
  417. return -EINVAL;
  418. switch (optname) {
  419. case CAN_RAW_FILTER:
  420. if (optlen % sizeof(struct can_filter) != 0)
  421. return -EINVAL;
  422. if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
  423. return -EINVAL;
  424. count = optlen / sizeof(struct can_filter);
  425. if (count > 1) {
  426. /* filter does not fit into dfilter => alloc space */
  427. filter = memdup_user(optval, optlen);
  428. if (IS_ERR(filter))
  429. return PTR_ERR(filter);
  430. } else if (count == 1) {
  431. if (copy_from_user(&sfilter, optval, sizeof(sfilter)))
  432. return -EFAULT;
  433. }
  434. lock_sock(sk);
  435. if (ro->bound && ro->ifindex)
  436. dev = dev_get_by_index(sock_net(sk), ro->ifindex);
  437. if (ro->bound) {
  438. /* (try to) register the new filters */
  439. if (count == 1)
  440. err = raw_enable_filters(sock_net(sk), dev, sk,
  441. &sfilter, 1);
  442. else
  443. err = raw_enable_filters(sock_net(sk), dev, sk,
  444. filter, count);
  445. if (err) {
  446. if (count > 1)
  447. kfree(filter);
  448. goto out_fil;
  449. }
  450. /* remove old filter registrations */
  451. raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
  452. ro->count);
  453. }
  454. /* remove old filter space */
  455. if (ro->count > 1)
  456. kfree(ro->filter);
  457. /* link new filters to the socket */
  458. if (count == 1) {
  459. /* copy filter data for single filter */
  460. ro->dfilter = sfilter;
  461. filter = &ro->dfilter;
  462. }
  463. ro->filter = filter;
  464. ro->count = count;
  465. out_fil:
  466. if (dev)
  467. dev_put(dev);
  468. release_sock(sk);
  469. break;
  470. case CAN_RAW_ERR_FILTER:
  471. if (optlen != sizeof(err_mask))
  472. return -EINVAL;
  473. if (copy_from_user(&err_mask, optval, optlen))
  474. return -EFAULT;
  475. err_mask &= CAN_ERR_MASK;
  476. lock_sock(sk);
  477. if (ro->bound && ro->ifindex)
  478. dev = dev_get_by_index(sock_net(sk), ro->ifindex);
  479. /* remove current error mask */
  480. if (ro->bound) {
  481. /* (try to) register the new err_mask */
  482. err = raw_enable_errfilter(sock_net(sk), dev, sk,
  483. err_mask);
  484. if (err)
  485. goto out_err;
  486. /* remove old err_mask registration */
  487. raw_disable_errfilter(sock_net(sk), dev, sk,
  488. ro->err_mask);
  489. }
  490. /* link new err_mask to the socket */
  491. ro->err_mask = err_mask;
  492. out_err:
  493. if (dev)
  494. dev_put(dev);
  495. release_sock(sk);
  496. break;
  497. case CAN_RAW_LOOPBACK:
  498. if (optlen != sizeof(ro->loopback))
  499. return -EINVAL;
  500. if (copy_from_user(&ro->loopback, optval, optlen))
  501. return -EFAULT;
  502. break;
  503. case CAN_RAW_RECV_OWN_MSGS:
  504. if (optlen != sizeof(ro->recv_own_msgs))
  505. return -EINVAL;
  506. if (copy_from_user(&ro->recv_own_msgs, optval, optlen))
  507. return -EFAULT;
  508. break;
  509. case CAN_RAW_FD_FRAMES:
  510. if (optlen != sizeof(ro->fd_frames))
  511. return -EINVAL;
  512. if (copy_from_user(&ro->fd_frames, optval, optlen))
  513. return -EFAULT;
  514. break;
  515. case CAN_RAW_JOIN_FILTERS:
  516. if (optlen != sizeof(ro->join_filters))
  517. return -EINVAL;
  518. if (copy_from_user(&ro->join_filters, optval, optlen))
  519. return -EFAULT;
  520. break;
  521. default:
  522. return -ENOPROTOOPT;
  523. }
  524. return err;
  525. }
  526. static int raw_getsockopt(struct socket *sock, int level, int optname,
  527. char __user *optval, int __user *optlen)
  528. {
  529. struct sock *sk = sock->sk;
  530. struct raw_sock *ro = raw_sk(sk);
  531. int len;
  532. void *val;
  533. int err = 0;
  534. if (level != SOL_CAN_RAW)
  535. return -EINVAL;
  536. if (get_user(len, optlen))
  537. return -EFAULT;
  538. if (len < 0)
  539. return -EINVAL;
  540. switch (optname) {
  541. case CAN_RAW_FILTER:
  542. lock_sock(sk);
  543. if (ro->count > 0) {
  544. int fsize = ro->count * sizeof(struct can_filter);
  545. if (len > fsize)
  546. len = fsize;
  547. if (copy_to_user(optval, ro->filter, len))
  548. err = -EFAULT;
  549. } else
  550. len = 0;
  551. release_sock(sk);
  552. if (!err)
  553. err = put_user(len, optlen);
  554. return err;
  555. case CAN_RAW_ERR_FILTER:
  556. if (len > sizeof(can_err_mask_t))
  557. len = sizeof(can_err_mask_t);
  558. val = &ro->err_mask;
  559. break;
  560. case CAN_RAW_LOOPBACK:
  561. if (len > sizeof(int))
  562. len = sizeof(int);
  563. val = &ro->loopback;
  564. break;
  565. case CAN_RAW_RECV_OWN_MSGS:
  566. if (len > sizeof(int))
  567. len = sizeof(int);
  568. val = &ro->recv_own_msgs;
  569. break;
  570. case CAN_RAW_FD_FRAMES:
  571. if (len > sizeof(int))
  572. len = sizeof(int);
  573. val = &ro->fd_frames;
  574. break;
  575. case CAN_RAW_JOIN_FILTERS:
  576. if (len > sizeof(int))
  577. len = sizeof(int);
  578. val = &ro->join_filters;
  579. break;
  580. default:
  581. return -ENOPROTOOPT;
  582. }
  583. if (put_user(len, optlen))
  584. return -EFAULT;
  585. if (copy_to_user(optval, val, len))
  586. return -EFAULT;
  587. return 0;
  588. }
  589. static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
  590. {
  591. struct sock *sk = sock->sk;
  592. struct raw_sock *ro = raw_sk(sk);
  593. struct sk_buff *skb;
  594. struct net_device *dev;
  595. int ifindex;
  596. int err;
  597. if (msg->msg_name) {
  598. DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
  599. if (msg->msg_namelen < sizeof(*addr))
  600. return -EINVAL;
  601. if (addr->can_family != AF_CAN)
  602. return -EINVAL;
  603. ifindex = addr->can_ifindex;
  604. } else
  605. ifindex = ro->ifindex;
  606. dev = dev_get_by_index(sock_net(sk), ifindex);
  607. if (!dev)
  608. return -ENXIO;
  609. err = -EINVAL;
  610. if (ro->fd_frames && dev->mtu == CANFD_MTU) {
  611. if (unlikely(size != CANFD_MTU && size != CAN_MTU))
  612. goto put_dev;
  613. } else {
  614. if (unlikely(size != CAN_MTU))
  615. goto put_dev;
  616. }
  617. skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
  618. msg->msg_flags & MSG_DONTWAIT, &err);
  619. if (!skb)
  620. goto put_dev;
  621. can_skb_reserve(skb);
  622. can_skb_prv(skb)->ifindex = dev->ifindex;
  623. can_skb_prv(skb)->skbcnt = 0;
  624. err = memcpy_from_msg(skb_put(skb, size), msg, size);
  625. if (err < 0)
  626. goto free_skb;
  627. sock_tx_timestamp(sk, sk->sk_tsflags, &skb_shinfo(skb)->tx_flags);
  628. skb->dev = dev;
  629. skb->sk = sk;
  630. skb->priority = sk->sk_priority;
  631. err = can_send(skb, ro->loopback);
  632. dev_put(dev);
  633. if (err)
  634. goto send_failed;
  635. return size;
  636. free_skb:
  637. kfree_skb(skb);
  638. put_dev:
  639. dev_put(dev);
  640. send_failed:
  641. return err;
  642. }
  643. static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
  644. int flags)
  645. {
  646. struct sock *sk = sock->sk;
  647. struct sk_buff *skb;
  648. int err = 0;
  649. int noblock;
  650. noblock = flags & MSG_DONTWAIT;
  651. flags &= ~MSG_DONTWAIT;
  652. skb = skb_recv_datagram(sk, flags, noblock, &err);
  653. if (!skb)
  654. return err;
  655. if (size < skb->len)
  656. msg->msg_flags |= MSG_TRUNC;
  657. else
  658. size = skb->len;
  659. err = memcpy_to_msg(msg, skb->data, size);
  660. if (err < 0) {
  661. skb_free_datagram(sk, skb);
  662. return err;
  663. }
  664. sock_recv_ts_and_drops(msg, sk, skb);
  665. if (msg->msg_name) {
  666. __sockaddr_check_size(sizeof(struct sockaddr_can));
  667. msg->msg_namelen = sizeof(struct sockaddr_can);
  668. memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
  669. }
  670. /* assign the flags that have been recorded in raw_rcv() */
  671. msg->msg_flags |= *(raw_flags(skb));
  672. skb_free_datagram(sk, skb);
  673. return size;
  674. }
  675. static const struct proto_ops raw_ops = {
  676. .family = PF_CAN,
  677. .release = raw_release,
  678. .bind = raw_bind,
  679. .connect = sock_no_connect,
  680. .socketpair = sock_no_socketpair,
  681. .accept = sock_no_accept,
  682. .getname = raw_getname,
  683. .poll = datagram_poll,
  684. .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */
  685. .listen = sock_no_listen,
  686. .shutdown = sock_no_shutdown,
  687. .setsockopt = raw_setsockopt,
  688. .getsockopt = raw_getsockopt,
  689. .sendmsg = raw_sendmsg,
  690. .recvmsg = raw_recvmsg,
  691. .mmap = sock_no_mmap,
  692. .sendpage = sock_no_sendpage,
  693. };
  694. static struct proto raw_proto __read_mostly = {
  695. .name = "CAN_RAW",
  696. .owner = THIS_MODULE,
  697. .obj_size = sizeof(struct raw_sock),
  698. .init = raw_init,
  699. };
  700. static const struct can_proto raw_can_proto = {
  701. .type = SOCK_RAW,
  702. .protocol = CAN_RAW,
  703. .ops = &raw_ops,
  704. .prot = &raw_proto,
  705. };
  706. static __init int raw_module_init(void)
  707. {
  708. int err;
  709. pr_info("can: raw protocol (rev " CAN_RAW_VERSION ")\n");
  710. err = can_proto_register(&raw_can_proto);
  711. if (err < 0)
  712. printk(KERN_ERR "can: registration of raw protocol failed\n");
  713. return err;
  714. }
  715. static __exit void raw_module_exit(void)
  716. {
  717. can_proto_unregister(&raw_can_proto);
  718. }
  719. module_init(raw_module_init);
  720. module_exit(raw_module_exit);