qrtr.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149
  1. /*
  2. * Copyright (c) 2015, Sony Mobile Communications Inc.
  3. * Copyright (c) 2013, The Linux Foundation. All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 and
  7. * only version 2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/netlink.h>
  16. #include <linux/qrtr.h>
  17. #include <linux/termios.h> /* For TIOCINQ/OUTQ */
  18. #include <net/sock.h>
  19. #include "qrtr.h"
  20. #define QRTR_PROTO_VER_1 1
  21. #define QRTR_PROTO_VER_2 3
  22. /* auto-bind range */
  23. #define QRTR_MIN_EPH_SOCKET 0x4000
  24. #define QRTR_MAX_EPH_SOCKET 0x7fff
  25. /**
  26. * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1
  27. * @version: protocol version
  28. * @type: packet type; one of QRTR_TYPE_*
  29. * @src_node_id: source node
  30. * @src_port_id: source port
  31. * @confirm_rx: boolean; whether a resume-tx packet should be send in reply
  32. * @size: length of packet, excluding this header
  33. * @dst_node_id: destination node
  34. * @dst_port_id: destination port
  35. */
  36. struct qrtr_hdr_v1 {
  37. __le32 version;
  38. __le32 type;
  39. __le32 src_node_id;
  40. __le32 src_port_id;
  41. __le32 confirm_rx;
  42. __le32 size;
  43. __le32 dst_node_id;
  44. __le32 dst_port_id;
  45. } __packed;
  46. /**
  47. * struct qrtr_hdr_v2 - (I|R)PCrouter packet header later versions
  48. * @version: protocol version
  49. * @type: packet type; one of QRTR_TYPE_*
  50. * @flags: bitmask of QRTR_FLAGS_*
  51. * @optlen: length of optional header data
  52. * @size: length of packet, excluding this header and optlen
  53. * @src_node_id: source node
  54. * @src_port_id: source port
  55. * @dst_node_id: destination node
  56. * @dst_port_id: destination port
  57. */
  58. struct qrtr_hdr_v2 {
  59. u8 version;
  60. u8 type;
  61. u8 flags;
  62. u8 optlen;
  63. __le32 size;
  64. __le16 src_node_id;
  65. __le16 src_port_id;
  66. __le16 dst_node_id;
  67. __le16 dst_port_id;
  68. };
  69. #define QRTR_FLAGS_CONFIRM_RX BIT(0)
  70. struct qrtr_cb {
  71. u32 src_node;
  72. u32 src_port;
  73. u32 dst_node;
  74. u32 dst_port;
  75. u8 type;
  76. u8 confirm_rx;
  77. };
  78. #define QRTR_HDR_MAX_SIZE max_t(size_t, sizeof(struct qrtr_hdr_v1), \
  79. sizeof(struct qrtr_hdr_v2))
  80. struct qrtr_sock {
  81. /* WARNING: sk must be the first member */
  82. struct sock sk;
  83. struct sockaddr_qrtr us;
  84. struct sockaddr_qrtr peer;
  85. };
  86. static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
  87. {
  88. BUILD_BUG_ON(offsetof(struct qrtr_sock, sk) != 0);
  89. return container_of(sk, struct qrtr_sock, sk);
  90. }
  91. static unsigned int qrtr_local_nid = -1;
  92. /* for node ids */
  93. static RADIX_TREE(qrtr_nodes, GFP_KERNEL);
  94. /* broadcast list */
  95. static LIST_HEAD(qrtr_all_nodes);
  96. /* lock for qrtr_nodes, qrtr_all_nodes and node reference */
  97. static DEFINE_MUTEX(qrtr_node_lock);
  98. /* local port allocation management */
  99. static DEFINE_IDR(qrtr_ports);
  100. static DEFINE_MUTEX(qrtr_port_lock);
  101. /**
  102. * struct qrtr_node - endpoint node
  103. * @ep_lock: lock for endpoint management and callbacks
  104. * @ep: endpoint
  105. * @ref: reference count for node
  106. * @nid: node id
  107. * @rx_queue: receive queue
  108. * @work: scheduled work struct for recv work
  109. * @item: list item for broadcast list
  110. */
  111. struct qrtr_node {
  112. struct mutex ep_lock;
  113. struct qrtr_endpoint *ep;
  114. struct kref ref;
  115. unsigned int nid;
  116. struct sk_buff_head rx_queue;
  117. struct work_struct work;
  118. struct list_head item;
  119. };
  120. static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
  121. int type, struct sockaddr_qrtr *from,
  122. struct sockaddr_qrtr *to);
  123. static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
  124. int type, struct sockaddr_qrtr *from,
  125. struct sockaddr_qrtr *to);
  126. /* Release node resources and free the node.
  127. *
  128. * Do not call directly, use qrtr_node_release. To be used with
  129. * kref_put_mutex. As such, the node mutex is expected to be locked on call.
  130. */
  131. static void __qrtr_node_release(struct kref *kref)
  132. {
  133. struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
  134. if (node->nid != QRTR_EP_NID_AUTO)
  135. radix_tree_delete(&qrtr_nodes, node->nid);
  136. list_del(&node->item);
  137. mutex_unlock(&qrtr_node_lock);
  138. cancel_work_sync(&node->work);
  139. skb_queue_purge(&node->rx_queue);
  140. kfree(node);
  141. }
  142. /* Increment reference to node. */
  143. static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
  144. {
  145. if (node)
  146. kref_get(&node->ref);
  147. return node;
  148. }
  149. /* Decrement reference to node and release as necessary. */
  150. static void qrtr_node_release(struct qrtr_node *node)
  151. {
  152. if (!node)
  153. return;
  154. kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
  155. }
  156. /* Pass an outgoing packet socket buffer to the endpoint driver. */
  157. static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
  158. int type, struct sockaddr_qrtr *from,
  159. struct sockaddr_qrtr *to)
  160. {
  161. struct qrtr_hdr_v1 *hdr;
  162. size_t len = skb->len;
  163. int rc = -ENODEV;
  164. hdr = skb_push(skb, sizeof(*hdr));
  165. hdr->version = cpu_to_le32(QRTR_PROTO_VER_1);
  166. hdr->type = cpu_to_le32(type);
  167. hdr->src_node_id = cpu_to_le32(from->sq_node);
  168. hdr->src_port_id = cpu_to_le32(from->sq_port);
  169. if (to->sq_port == QRTR_PORT_CTRL) {
  170. hdr->dst_node_id = cpu_to_le32(node->nid);
  171. hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST);
  172. } else {
  173. hdr->dst_node_id = cpu_to_le32(to->sq_node);
  174. hdr->dst_port_id = cpu_to_le32(to->sq_port);
  175. }
  176. hdr->size = cpu_to_le32(len);
  177. hdr->confirm_rx = 0;
  178. skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
  179. mutex_lock(&node->ep_lock);
  180. if (node->ep)
  181. rc = node->ep->xmit(node->ep, skb);
  182. else
  183. kfree_skb(skb);
  184. mutex_unlock(&node->ep_lock);
  185. return rc;
  186. }
  187. /* Lookup node by id.
  188. *
  189. * callers must release with qrtr_node_release()
  190. */
  191. static struct qrtr_node *qrtr_node_lookup(unsigned int nid)
  192. {
  193. struct qrtr_node *node;
  194. mutex_lock(&qrtr_node_lock);
  195. node = radix_tree_lookup(&qrtr_nodes, nid);
  196. node = qrtr_node_acquire(node);
  197. mutex_unlock(&qrtr_node_lock);
  198. return node;
  199. }
  200. /* Assign node id to node.
  201. *
  202. * This is mostly useful for automatic node id assignment, based on
  203. * the source id in the incoming packet.
  204. */
  205. static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
  206. {
  207. if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
  208. return;
  209. mutex_lock(&qrtr_node_lock);
  210. radix_tree_insert(&qrtr_nodes, nid, node);
  211. node->nid = nid;
  212. mutex_unlock(&qrtr_node_lock);
  213. }
  214. /**
  215. * qrtr_endpoint_post() - post incoming data
  216. * @ep: endpoint handle
  217. * @data: data pointer
  218. * @len: size of data in bytes
  219. *
  220. * Return: 0 on success; negative error code on failure
  221. */
  222. int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
  223. {
  224. struct qrtr_node *node = ep->node;
  225. const struct qrtr_hdr_v1 *v1;
  226. const struct qrtr_hdr_v2 *v2;
  227. struct sk_buff *skb;
  228. struct qrtr_cb *cb;
  229. unsigned int size;
  230. unsigned int ver;
  231. size_t hdrlen;
  232. if (len & 3)
  233. return -EINVAL;
  234. skb = netdev_alloc_skb(NULL, len);
  235. if (!skb)
  236. return -ENOMEM;
  237. cb = (struct qrtr_cb *)skb->cb;
  238. /* Version field in v1 is little endian, so this works for both cases */
  239. ver = *(u8*)data;
  240. switch (ver) {
  241. case QRTR_PROTO_VER_1:
  242. v1 = data;
  243. hdrlen = sizeof(*v1);
  244. cb->type = le32_to_cpu(v1->type);
  245. cb->src_node = le32_to_cpu(v1->src_node_id);
  246. cb->src_port = le32_to_cpu(v1->src_port_id);
  247. cb->confirm_rx = !!v1->confirm_rx;
  248. cb->dst_node = le32_to_cpu(v1->dst_node_id);
  249. cb->dst_port = le32_to_cpu(v1->dst_port_id);
  250. size = le32_to_cpu(v1->size);
  251. break;
  252. case QRTR_PROTO_VER_2:
  253. v2 = data;
  254. hdrlen = sizeof(*v2) + v2->optlen;
  255. cb->type = v2->type;
  256. cb->confirm_rx = !!(v2->flags & QRTR_FLAGS_CONFIRM_RX);
  257. cb->src_node = le16_to_cpu(v2->src_node_id);
  258. cb->src_port = le16_to_cpu(v2->src_port_id);
  259. cb->dst_node = le16_to_cpu(v2->dst_node_id);
  260. cb->dst_port = le16_to_cpu(v2->dst_port_id);
  261. if (cb->src_port == (u16)QRTR_PORT_CTRL)
  262. cb->src_port = QRTR_PORT_CTRL;
  263. if (cb->dst_port == (u16)QRTR_PORT_CTRL)
  264. cb->dst_port = QRTR_PORT_CTRL;
  265. size = le32_to_cpu(v2->size);
  266. break;
  267. default:
  268. pr_err("qrtr: Invalid version %d\n", ver);
  269. goto err;
  270. }
  271. if (len != ALIGN(size, 4) + hdrlen)
  272. goto err;
  273. if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA)
  274. goto err;
  275. skb_put_data(skb, data + hdrlen, size);
  276. skb_queue_tail(&node->rx_queue, skb);
  277. schedule_work(&node->work);
  278. return 0;
  279. err:
  280. kfree_skb(skb);
  281. return -EINVAL;
  282. }
  283. EXPORT_SYMBOL_GPL(qrtr_endpoint_post);
  284. /**
  285. * qrtr_alloc_ctrl_packet() - allocate control packet skb
  286. * @pkt: reference to qrtr_ctrl_pkt pointer
  287. *
  288. * Returns newly allocated sk_buff, or NULL on failure
  289. *
  290. * This function allocates a sk_buff large enough to carry a qrtr_ctrl_pkt and
  291. * on success returns a reference to the control packet in @pkt.
  292. */
  293. static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt)
  294. {
  295. const int pkt_len = sizeof(struct qrtr_ctrl_pkt);
  296. struct sk_buff *skb;
  297. skb = alloc_skb(QRTR_HDR_MAX_SIZE + pkt_len, GFP_KERNEL);
  298. if (!skb)
  299. return NULL;
  300. skb_reserve(skb, QRTR_HDR_MAX_SIZE);
  301. *pkt = skb_put_zero(skb, pkt_len);
  302. return skb;
  303. }
  304. static struct qrtr_sock *qrtr_port_lookup(int port);
  305. static void qrtr_port_put(struct qrtr_sock *ipc);
  306. /* Handle and route a received packet.
  307. *
  308. * This will auto-reply with resume-tx packet as necessary.
  309. */
  310. static void qrtr_node_rx_work(struct work_struct *work)
  311. {
  312. struct qrtr_node *node = container_of(work, struct qrtr_node, work);
  313. struct qrtr_ctrl_pkt *pkt;
  314. struct sockaddr_qrtr dst;
  315. struct sockaddr_qrtr src;
  316. struct sk_buff *skb;
  317. while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
  318. struct qrtr_sock *ipc;
  319. struct qrtr_cb *cb;
  320. int confirm;
  321. cb = (struct qrtr_cb *)skb->cb;
  322. src.sq_node = cb->src_node;
  323. src.sq_port = cb->src_port;
  324. dst.sq_node = cb->dst_node;
  325. dst.sq_port = cb->dst_port;
  326. confirm = !!cb->confirm_rx;
  327. qrtr_node_assign(node, cb->src_node);
  328. ipc = qrtr_port_lookup(cb->dst_port);
  329. if (!ipc) {
  330. kfree_skb(skb);
  331. } else {
  332. if (sock_queue_rcv_skb(&ipc->sk, skb))
  333. kfree_skb(skb);
  334. qrtr_port_put(ipc);
  335. }
  336. if (confirm) {
  337. skb = qrtr_alloc_ctrl_packet(&pkt);
  338. if (!skb)
  339. break;
  340. pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX);
  341. pkt->client.node = cpu_to_le32(dst.sq_node);
  342. pkt->client.port = cpu_to_le32(dst.sq_port);
  343. if (qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX,
  344. &dst, &src))
  345. break;
  346. }
  347. }
  348. }
  349. /**
  350. * qrtr_endpoint_register() - register a new endpoint
  351. * @ep: endpoint to register
  352. * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
  353. * Return: 0 on success; negative error code on failure
  354. *
  355. * The specified endpoint must have the xmit function pointer set on call.
  356. */
  357. int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
  358. {
  359. struct qrtr_node *node;
  360. if (!ep || !ep->xmit)
  361. return -EINVAL;
  362. node = kzalloc(sizeof(*node), GFP_KERNEL);
  363. if (!node)
  364. return -ENOMEM;
  365. INIT_WORK(&node->work, qrtr_node_rx_work);
  366. kref_init(&node->ref);
  367. mutex_init(&node->ep_lock);
  368. skb_queue_head_init(&node->rx_queue);
  369. node->nid = QRTR_EP_NID_AUTO;
  370. node->ep = ep;
  371. qrtr_node_assign(node, nid);
  372. mutex_lock(&qrtr_node_lock);
  373. list_add(&node->item, &qrtr_all_nodes);
  374. mutex_unlock(&qrtr_node_lock);
  375. ep->node = node;
  376. return 0;
  377. }
  378. EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
  379. /**
  380. * qrtr_endpoint_unregister - unregister endpoint
  381. * @ep: endpoint to unregister
  382. */
  383. void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
  384. {
  385. struct qrtr_node *node = ep->node;
  386. struct sockaddr_qrtr src = {AF_QIPCRTR, node->nid, QRTR_PORT_CTRL};
  387. struct sockaddr_qrtr dst = {AF_QIPCRTR, qrtr_local_nid, QRTR_PORT_CTRL};
  388. struct qrtr_ctrl_pkt *pkt;
  389. struct sk_buff *skb;
  390. mutex_lock(&node->ep_lock);
  391. node->ep = NULL;
  392. mutex_unlock(&node->ep_lock);
  393. /* Notify the local controller about the event */
  394. skb = qrtr_alloc_ctrl_packet(&pkt);
  395. if (skb) {
  396. pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE);
  397. qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst);
  398. }
  399. qrtr_node_release(node);
  400. ep->node = NULL;
  401. }
  402. EXPORT_SYMBOL_GPL(qrtr_endpoint_unregister);
  403. /* Lookup socket by port.
  404. *
  405. * Callers must release with qrtr_port_put()
  406. */
  407. static struct qrtr_sock *qrtr_port_lookup(int port)
  408. {
  409. struct qrtr_sock *ipc;
  410. if (port == QRTR_PORT_CTRL)
  411. port = 0;
  412. mutex_lock(&qrtr_port_lock);
  413. ipc = idr_find(&qrtr_ports, port);
  414. if (ipc)
  415. sock_hold(&ipc->sk);
  416. mutex_unlock(&qrtr_port_lock);
  417. return ipc;
  418. }
  419. /* Release acquired socket. */
  420. static void qrtr_port_put(struct qrtr_sock *ipc)
  421. {
  422. sock_put(&ipc->sk);
  423. }
  424. /* Remove port assignment. */
  425. static void qrtr_port_remove(struct qrtr_sock *ipc)
  426. {
  427. struct qrtr_ctrl_pkt *pkt;
  428. struct sk_buff *skb;
  429. int port = ipc->us.sq_port;
  430. struct sockaddr_qrtr to;
  431. to.sq_family = AF_QIPCRTR;
  432. to.sq_node = QRTR_NODE_BCAST;
  433. to.sq_port = QRTR_PORT_CTRL;
  434. skb = qrtr_alloc_ctrl_packet(&pkt);
  435. if (skb) {
  436. pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
  437. pkt->client.node = cpu_to_le32(ipc->us.sq_node);
  438. pkt->client.port = cpu_to_le32(ipc->us.sq_port);
  439. skb_set_owner_w(skb, &ipc->sk);
  440. qrtr_bcast_enqueue(NULL, skb, QRTR_TYPE_DEL_CLIENT, &ipc->us,
  441. &to);
  442. }
  443. if (port == QRTR_PORT_CTRL)
  444. port = 0;
  445. __sock_put(&ipc->sk);
  446. mutex_lock(&qrtr_port_lock);
  447. idr_remove(&qrtr_ports, port);
  448. mutex_unlock(&qrtr_port_lock);
  449. }
  450. /* Assign port number to socket.
  451. *
  452. * Specify port in the integer pointed to by port, and it will be adjusted
  453. * on return as necesssary.
  454. *
  455. * Port may be:
  456. * 0: Assign ephemeral port in [QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET]
  457. * <QRTR_MIN_EPH_SOCKET: Specified; requires CAP_NET_ADMIN
  458. * >QRTR_MIN_EPH_SOCKET: Specified; available to all
  459. */
  460. static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
  461. {
  462. int rc;
  463. mutex_lock(&qrtr_port_lock);
  464. if (!*port) {
  465. rc = idr_alloc(&qrtr_ports, ipc,
  466. QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
  467. GFP_ATOMIC);
  468. if (rc >= 0)
  469. *port = rc;
  470. } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
  471. rc = -EACCES;
  472. } else if (*port == QRTR_PORT_CTRL) {
  473. rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
  474. } else {
  475. rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
  476. if (rc >= 0)
  477. *port = rc;
  478. }
  479. mutex_unlock(&qrtr_port_lock);
  480. if (rc == -ENOSPC)
  481. return -EADDRINUSE;
  482. else if (rc < 0)
  483. return rc;
  484. sock_hold(&ipc->sk);
  485. return 0;
  486. }
  487. /* Reset all non-control ports */
  488. static void qrtr_reset_ports(void)
  489. {
  490. struct qrtr_sock *ipc;
  491. int id;
  492. mutex_lock(&qrtr_port_lock);
  493. idr_for_each_entry(&qrtr_ports, ipc, id) {
  494. /* Don't reset control port */
  495. if (id == 0)
  496. continue;
  497. sock_hold(&ipc->sk);
  498. ipc->sk.sk_err = ENETRESET;
  499. ipc->sk.sk_error_report(&ipc->sk);
  500. sock_put(&ipc->sk);
  501. }
  502. mutex_unlock(&qrtr_port_lock);
  503. }
  504. /* Bind socket to address.
  505. *
  506. * Socket should be locked upon call.
  507. */
  508. static int __qrtr_bind(struct socket *sock,
  509. const struct sockaddr_qrtr *addr, int zapped)
  510. {
  511. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  512. struct sock *sk = sock->sk;
  513. int port;
  514. int rc;
  515. /* rebinding ok */
  516. if (!zapped && addr->sq_port == ipc->us.sq_port)
  517. return 0;
  518. port = addr->sq_port;
  519. rc = qrtr_port_assign(ipc, &port);
  520. if (rc)
  521. return rc;
  522. /* unbind previous, if any */
  523. if (!zapped)
  524. qrtr_port_remove(ipc);
  525. ipc->us.sq_port = port;
  526. sock_reset_flag(sk, SOCK_ZAPPED);
  527. /* Notify all open ports about the new controller */
  528. if (port == QRTR_PORT_CTRL)
  529. qrtr_reset_ports();
  530. return 0;
  531. }
  532. /* Auto bind to an ephemeral port. */
  533. static int qrtr_autobind(struct socket *sock)
  534. {
  535. struct sock *sk = sock->sk;
  536. struct sockaddr_qrtr addr;
  537. if (!sock_flag(sk, SOCK_ZAPPED))
  538. return 0;
  539. addr.sq_family = AF_QIPCRTR;
  540. addr.sq_node = qrtr_local_nid;
  541. addr.sq_port = 0;
  542. return __qrtr_bind(sock, &addr, 1);
  543. }
  544. /* Bind socket to specified sockaddr. */
  545. static int qrtr_bind(struct socket *sock, struct sockaddr *saddr, int len)
  546. {
  547. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
  548. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  549. struct sock *sk = sock->sk;
  550. int rc;
  551. if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
  552. return -EINVAL;
  553. if (addr->sq_node != ipc->us.sq_node)
  554. return -EINVAL;
  555. lock_sock(sk);
  556. rc = __qrtr_bind(sock, addr, sock_flag(sk, SOCK_ZAPPED));
  557. release_sock(sk);
  558. return rc;
  559. }
  560. /* Queue packet to local peer socket. */
  561. static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
  562. int type, struct sockaddr_qrtr *from,
  563. struct sockaddr_qrtr *to)
  564. {
  565. struct qrtr_sock *ipc;
  566. struct qrtr_cb *cb;
  567. ipc = qrtr_port_lookup(to->sq_port);
  568. if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
  569. kfree_skb(skb);
  570. return -ENODEV;
  571. }
  572. cb = (struct qrtr_cb *)skb->cb;
  573. cb->src_node = from->sq_node;
  574. cb->src_port = from->sq_port;
  575. if (sock_queue_rcv_skb(&ipc->sk, skb)) {
  576. qrtr_port_put(ipc);
  577. kfree_skb(skb);
  578. return -ENOSPC;
  579. }
  580. qrtr_port_put(ipc);
  581. return 0;
  582. }
  583. /* Queue packet for broadcast. */
  584. static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
  585. int type, struct sockaddr_qrtr *from,
  586. struct sockaddr_qrtr *to)
  587. {
  588. struct sk_buff *skbn;
  589. mutex_lock(&qrtr_node_lock);
  590. list_for_each_entry(node, &qrtr_all_nodes, item) {
  591. skbn = skb_clone(skb, GFP_KERNEL);
  592. if (!skbn)
  593. break;
  594. skb_set_owner_w(skbn, skb->sk);
  595. qrtr_node_enqueue(node, skbn, type, from, to);
  596. }
  597. mutex_unlock(&qrtr_node_lock);
  598. qrtr_local_enqueue(node, skb, type, from, to);
  599. return 0;
  600. }
  601. static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
  602. {
  603. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
  604. int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *, int,
  605. struct sockaddr_qrtr *, struct sockaddr_qrtr *);
  606. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  607. struct sock *sk = sock->sk;
  608. struct qrtr_node *node;
  609. struct sk_buff *skb;
  610. size_t plen;
  611. u32 type = QRTR_TYPE_DATA;
  612. int rc;
  613. if (msg->msg_flags & ~(MSG_DONTWAIT))
  614. return -EINVAL;
  615. if (len > 65535)
  616. return -EMSGSIZE;
  617. lock_sock(sk);
  618. if (addr) {
  619. if (msg->msg_namelen < sizeof(*addr)) {
  620. release_sock(sk);
  621. return -EINVAL;
  622. }
  623. if (addr->sq_family != AF_QIPCRTR) {
  624. release_sock(sk);
  625. return -EINVAL;
  626. }
  627. rc = qrtr_autobind(sock);
  628. if (rc) {
  629. release_sock(sk);
  630. return rc;
  631. }
  632. } else if (sk->sk_state == TCP_ESTABLISHED) {
  633. addr = &ipc->peer;
  634. } else {
  635. release_sock(sk);
  636. return -ENOTCONN;
  637. }
  638. node = NULL;
  639. if (addr->sq_node == QRTR_NODE_BCAST) {
  640. enqueue_fn = qrtr_bcast_enqueue;
  641. if (addr->sq_port != QRTR_PORT_CTRL) {
  642. release_sock(sk);
  643. return -ENOTCONN;
  644. }
  645. } else if (addr->sq_node == ipc->us.sq_node) {
  646. enqueue_fn = qrtr_local_enqueue;
  647. } else {
  648. enqueue_fn = qrtr_node_enqueue;
  649. node = qrtr_node_lookup(addr->sq_node);
  650. if (!node) {
  651. release_sock(sk);
  652. return -ECONNRESET;
  653. }
  654. }
  655. plen = (len + 3) & ~3;
  656. skb = sock_alloc_send_skb(sk, plen + QRTR_HDR_MAX_SIZE,
  657. msg->msg_flags & MSG_DONTWAIT, &rc);
  658. if (!skb)
  659. goto out_node;
  660. skb_reserve(skb, QRTR_HDR_MAX_SIZE);
  661. rc = memcpy_from_msg(skb_put(skb, len), msg, len);
  662. if (rc) {
  663. kfree_skb(skb);
  664. goto out_node;
  665. }
  666. if (ipc->us.sq_port == QRTR_PORT_CTRL) {
  667. if (len < 4) {
  668. rc = -EINVAL;
  669. kfree_skb(skb);
  670. goto out_node;
  671. }
  672. /* control messages already require the type as 'command' */
  673. skb_copy_bits(skb, 0, &type, 4);
  674. type = le32_to_cpu(type);
  675. }
  676. rc = enqueue_fn(node, skb, type, &ipc->us, addr);
  677. if (rc >= 0)
  678. rc = len;
  679. out_node:
  680. qrtr_node_release(node);
  681. release_sock(sk);
  682. return rc;
  683. }
  684. static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
  685. size_t size, int flags)
  686. {
  687. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
  688. struct sock *sk = sock->sk;
  689. struct sk_buff *skb;
  690. struct qrtr_cb *cb;
  691. int copied, rc;
  692. lock_sock(sk);
  693. if (sock_flag(sk, SOCK_ZAPPED)) {
  694. release_sock(sk);
  695. return -EADDRNOTAVAIL;
  696. }
  697. skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
  698. flags & MSG_DONTWAIT, &rc);
  699. if (!skb) {
  700. release_sock(sk);
  701. return rc;
  702. }
  703. copied = skb->len;
  704. if (copied > size) {
  705. copied = size;
  706. msg->msg_flags |= MSG_TRUNC;
  707. }
  708. rc = skb_copy_datagram_msg(skb, 0, msg, copied);
  709. if (rc < 0)
  710. goto out;
  711. rc = copied;
  712. if (addr) {
  713. cb = (struct qrtr_cb *)skb->cb;
  714. addr->sq_family = AF_QIPCRTR;
  715. addr->sq_node = cb->src_node;
  716. addr->sq_port = cb->src_port;
  717. msg->msg_namelen = sizeof(*addr);
  718. }
  719. out:
  720. skb_free_datagram(sk, skb);
  721. release_sock(sk);
  722. return rc;
  723. }
  724. static int qrtr_connect(struct socket *sock, struct sockaddr *saddr,
  725. int len, int flags)
  726. {
  727. DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, saddr);
  728. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  729. struct sock *sk = sock->sk;
  730. int rc;
  731. if (len < sizeof(*addr) || addr->sq_family != AF_QIPCRTR)
  732. return -EINVAL;
  733. lock_sock(sk);
  734. sk->sk_state = TCP_CLOSE;
  735. sock->state = SS_UNCONNECTED;
  736. rc = qrtr_autobind(sock);
  737. if (rc) {
  738. release_sock(sk);
  739. return rc;
  740. }
  741. ipc->peer = *addr;
  742. sock->state = SS_CONNECTED;
  743. sk->sk_state = TCP_ESTABLISHED;
  744. release_sock(sk);
  745. return 0;
  746. }
  747. static int qrtr_getname(struct socket *sock, struct sockaddr *saddr,
  748. int peer)
  749. {
  750. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  751. struct sockaddr_qrtr qaddr;
  752. struct sock *sk = sock->sk;
  753. lock_sock(sk);
  754. if (peer) {
  755. if (sk->sk_state != TCP_ESTABLISHED) {
  756. release_sock(sk);
  757. return -ENOTCONN;
  758. }
  759. qaddr = ipc->peer;
  760. } else {
  761. qaddr = ipc->us;
  762. }
  763. release_sock(sk);
  764. qaddr.sq_family = AF_QIPCRTR;
  765. memcpy(saddr, &qaddr, sizeof(qaddr));
  766. return sizeof(qaddr);
  767. }
  768. static int qrtr_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  769. {
  770. void __user *argp = (void __user *)arg;
  771. struct qrtr_sock *ipc = qrtr_sk(sock->sk);
  772. struct sock *sk = sock->sk;
  773. struct sockaddr_qrtr *sq;
  774. struct sk_buff *skb;
  775. struct ifreq ifr;
  776. long len = 0;
  777. int rc = 0;
  778. lock_sock(sk);
  779. switch (cmd) {
  780. case TIOCOUTQ:
  781. len = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
  782. if (len < 0)
  783. len = 0;
  784. rc = put_user(len, (int __user *)argp);
  785. break;
  786. case TIOCINQ:
  787. skb = skb_peek(&sk->sk_receive_queue);
  788. if (skb)
  789. len = skb->len;
  790. rc = put_user(len, (int __user *)argp);
  791. break;
  792. case SIOCGIFADDR:
  793. if (copy_from_user(&ifr, argp, sizeof(ifr))) {
  794. rc = -EFAULT;
  795. break;
  796. }
  797. sq = (struct sockaddr_qrtr *)&ifr.ifr_addr;
  798. *sq = ipc->us;
  799. if (copy_to_user(argp, &ifr, sizeof(ifr))) {
  800. rc = -EFAULT;
  801. break;
  802. }
  803. break;
  804. case SIOCGSTAMP:
  805. rc = sock_get_timestamp(sk, argp);
  806. break;
  807. case SIOCADDRT:
  808. case SIOCDELRT:
  809. case SIOCSIFADDR:
  810. case SIOCGIFDSTADDR:
  811. case SIOCSIFDSTADDR:
  812. case SIOCGIFBRDADDR:
  813. case SIOCSIFBRDADDR:
  814. case SIOCGIFNETMASK:
  815. case SIOCSIFNETMASK:
  816. rc = -EINVAL;
  817. break;
  818. default:
  819. rc = -ENOIOCTLCMD;
  820. break;
  821. }
  822. release_sock(sk);
  823. return rc;
  824. }
  825. static int qrtr_release(struct socket *sock)
  826. {
  827. struct sock *sk = sock->sk;
  828. struct qrtr_sock *ipc;
  829. if (!sk)
  830. return 0;
  831. lock_sock(sk);
  832. ipc = qrtr_sk(sk);
  833. sk->sk_shutdown = SHUTDOWN_MASK;
  834. if (!sock_flag(sk, SOCK_DEAD))
  835. sk->sk_state_change(sk);
  836. sock_set_flag(sk, SOCK_DEAD);
  837. sock->sk = NULL;
  838. if (!sock_flag(sk, SOCK_ZAPPED))
  839. qrtr_port_remove(ipc);
  840. skb_queue_purge(&sk->sk_receive_queue);
  841. release_sock(sk);
  842. sock_put(sk);
  843. return 0;
  844. }
  845. static const struct proto_ops qrtr_proto_ops = {
  846. .owner = THIS_MODULE,
  847. .family = AF_QIPCRTR,
  848. .bind = qrtr_bind,
  849. .connect = qrtr_connect,
  850. .socketpair = sock_no_socketpair,
  851. .accept = sock_no_accept,
  852. .listen = sock_no_listen,
  853. .sendmsg = qrtr_sendmsg,
  854. .recvmsg = qrtr_recvmsg,
  855. .getname = qrtr_getname,
  856. .ioctl = qrtr_ioctl,
  857. .poll = datagram_poll,
  858. .shutdown = sock_no_shutdown,
  859. .setsockopt = sock_no_setsockopt,
  860. .getsockopt = sock_no_getsockopt,
  861. .release = qrtr_release,
  862. .mmap = sock_no_mmap,
  863. .sendpage = sock_no_sendpage,
  864. };
  865. static struct proto qrtr_proto = {
  866. .name = "QIPCRTR",
  867. .owner = THIS_MODULE,
  868. .obj_size = sizeof(struct qrtr_sock),
  869. };
  870. static int qrtr_create(struct net *net, struct socket *sock,
  871. int protocol, int kern)
  872. {
  873. struct qrtr_sock *ipc;
  874. struct sock *sk;
  875. if (sock->type != SOCK_DGRAM)
  876. return -EPROTOTYPE;
  877. sk = sk_alloc(net, AF_QIPCRTR, GFP_KERNEL, &qrtr_proto, kern);
  878. if (!sk)
  879. return -ENOMEM;
  880. sock_set_flag(sk, SOCK_ZAPPED);
  881. sock_init_data(sock, sk);
  882. sock->ops = &qrtr_proto_ops;
  883. ipc = qrtr_sk(sk);
  884. ipc->us.sq_family = AF_QIPCRTR;
  885. ipc->us.sq_node = qrtr_local_nid;
  886. ipc->us.sq_port = 0;
  887. return 0;
  888. }
  889. static const struct nla_policy qrtr_policy[IFA_MAX + 1] = {
  890. [IFA_LOCAL] = { .type = NLA_U32 },
  891. };
  892. static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
  893. struct netlink_ext_ack *extack)
  894. {
  895. struct nlattr *tb[IFA_MAX + 1];
  896. struct ifaddrmsg *ifm;
  897. int rc;
  898. if (!netlink_capable(skb, CAP_NET_ADMIN))
  899. return -EPERM;
  900. if (!netlink_capable(skb, CAP_SYS_ADMIN))
  901. return -EPERM;
  902. ASSERT_RTNL();
  903. rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, qrtr_policy, extack);
  904. if (rc < 0)
  905. return rc;
  906. ifm = nlmsg_data(nlh);
  907. if (!tb[IFA_LOCAL])
  908. return -EINVAL;
  909. qrtr_local_nid = nla_get_u32(tb[IFA_LOCAL]);
  910. return 0;
  911. }
  912. static const struct net_proto_family qrtr_family = {
  913. .owner = THIS_MODULE,
  914. .family = AF_QIPCRTR,
  915. .create = qrtr_create,
  916. };
  917. static int __init qrtr_proto_init(void)
  918. {
  919. int rc;
  920. rc = proto_register(&qrtr_proto, 1);
  921. if (rc)
  922. return rc;
  923. rc = sock_register(&qrtr_family);
  924. if (rc) {
  925. proto_unregister(&qrtr_proto);
  926. return rc;
  927. }
  928. rc = rtnl_register_module(THIS_MODULE, PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, 0);
  929. if (rc) {
  930. sock_unregister(qrtr_family.family);
  931. proto_unregister(&qrtr_proto);
  932. }
  933. return rc;
  934. }
  935. postcore_initcall(qrtr_proto_init);
  936. static void __exit qrtr_proto_fini(void)
  937. {
  938. rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR);
  939. sock_unregister(qrtr_family.family);
  940. proto_unregister(&qrtr_proto);
  941. }
  942. module_exit(qrtr_proto_fini);
  943. MODULE_DESCRIPTION("Qualcomm IPC-router driver");
  944. MODULE_LICENSE("GPL v2");
  945. MODULE_ALIAS_NETPROTO(PF_QIPCRTR);