output.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. /* RxRPC packet transmission
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/net.h>
  13. #include <linux/gfp.h>
  14. #include <linux/skbuff.h>
  15. #include <linux/export.h>
  16. #include <net/sock.h>
  17. #include <net/af_rxrpc.h>
  18. #include "ar-internal.h"
  19. struct rxrpc_ack_buffer {
  20. struct rxrpc_wire_header whdr;
  21. struct rxrpc_ackpacket ack;
  22. u8 acks[255];
  23. u8 pad[3];
  24. struct rxrpc_ackinfo ackinfo;
  25. };
  26. struct rxrpc_abort_buffer {
  27. struct rxrpc_wire_header whdr;
  28. __be32 abort_code;
  29. };
  30. /*
  31. * Fill out an ACK packet.
  32. */
  33. static size_t rxrpc_fill_out_ack(struct rxrpc_call *call,
  34. struct rxrpc_ack_buffer *pkt,
  35. rxrpc_seq_t *_hard_ack,
  36. rxrpc_seq_t *_top,
  37. u8 reason)
  38. {
  39. rxrpc_serial_t serial;
  40. rxrpc_seq_t hard_ack, top, seq;
  41. int ix;
  42. u32 mtu, jmax;
  43. u8 *ackp = pkt->acks;
  44. /* Barrier against rxrpc_input_data(). */
  45. serial = call->ackr_serial;
  46. hard_ack = READ_ONCE(call->rx_hard_ack);
  47. top = smp_load_acquire(&call->rx_top);
  48. *_hard_ack = hard_ack;
  49. *_top = top;
  50. pkt->ack.bufferSpace = htons(8);
  51. pkt->ack.maxSkew = htons(call->ackr_skew);
  52. pkt->ack.firstPacket = htonl(hard_ack + 1);
  53. pkt->ack.previousPacket = htonl(call->ackr_prev_seq);
  54. pkt->ack.serial = htonl(serial);
  55. pkt->ack.reason = reason;
  56. pkt->ack.nAcks = top - hard_ack;
  57. if (reason == RXRPC_ACK_PING)
  58. pkt->whdr.flags |= RXRPC_REQUEST_ACK;
  59. if (after(top, hard_ack)) {
  60. seq = hard_ack + 1;
  61. do {
  62. ix = seq & RXRPC_RXTX_BUFF_MASK;
  63. if (call->rxtx_buffer[ix])
  64. *ackp++ = RXRPC_ACK_TYPE_ACK;
  65. else
  66. *ackp++ = RXRPC_ACK_TYPE_NACK;
  67. seq++;
  68. } while (before_eq(seq, top));
  69. }
  70. mtu = call->conn->params.peer->if_mtu;
  71. mtu -= call->conn->params.peer->hdrsize;
  72. jmax = (call->nr_jumbo_bad > 3) ? 1 : rxrpc_rx_jumbo_max;
  73. pkt->ackinfo.rxMTU = htonl(rxrpc_rx_mtu);
  74. pkt->ackinfo.maxMTU = htonl(mtu);
  75. pkt->ackinfo.rwind = htonl(call->rx_winsize);
  76. pkt->ackinfo.jumbo_max = htonl(jmax);
  77. *ackp++ = 0;
  78. *ackp++ = 0;
  79. *ackp++ = 0;
  80. return top - hard_ack + 3;
  81. }
  82. /*
  83. * Send an ACK call packet.
  84. */
  85. int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
  86. {
  87. struct rxrpc_connection *conn = NULL;
  88. struct rxrpc_ack_buffer *pkt;
  89. struct msghdr msg;
  90. struct kvec iov[2];
  91. rxrpc_serial_t serial;
  92. rxrpc_seq_t hard_ack, top;
  93. size_t len, n;
  94. int ret;
  95. u8 reason;
  96. spin_lock_bh(&call->lock);
  97. if (call->conn)
  98. conn = rxrpc_get_connection_maybe(call->conn);
  99. spin_unlock_bh(&call->lock);
  100. if (!conn)
  101. return -ECONNRESET;
  102. pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
  103. if (!pkt) {
  104. rxrpc_put_connection(conn);
  105. return -ENOMEM;
  106. }
  107. msg.msg_name = &call->peer->srx.transport;
  108. msg.msg_namelen = call->peer->srx.transport_len;
  109. msg.msg_control = NULL;
  110. msg.msg_controllen = 0;
  111. msg.msg_flags = 0;
  112. pkt->whdr.epoch = htonl(conn->proto.epoch);
  113. pkt->whdr.cid = htonl(call->cid);
  114. pkt->whdr.callNumber = htonl(call->call_id);
  115. pkt->whdr.seq = 0;
  116. pkt->whdr.type = RXRPC_PACKET_TYPE_ACK;
  117. pkt->whdr.flags = RXRPC_SLOW_START_OK | conn->out_clientflag;
  118. pkt->whdr.userStatus = 0;
  119. pkt->whdr.securityIndex = call->security_ix;
  120. pkt->whdr._rsvd = 0;
  121. pkt->whdr.serviceId = htons(call->service_id);
  122. spin_lock_bh(&call->lock);
  123. if (ping) {
  124. reason = RXRPC_ACK_PING;
  125. } else {
  126. reason = call->ackr_reason;
  127. if (!call->ackr_reason) {
  128. spin_unlock_bh(&call->lock);
  129. ret = 0;
  130. goto out;
  131. }
  132. call->ackr_reason = 0;
  133. }
  134. n = rxrpc_fill_out_ack(call, pkt, &hard_ack, &top, reason);
  135. spin_unlock_bh(&call->lock);
  136. iov[0].iov_base = pkt;
  137. iov[0].iov_len = sizeof(pkt->whdr) + sizeof(pkt->ack) + n;
  138. iov[1].iov_base = &pkt->ackinfo;
  139. iov[1].iov_len = sizeof(pkt->ackinfo);
  140. len = iov[0].iov_len + iov[1].iov_len;
  141. serial = atomic_inc_return(&conn->serial);
  142. pkt->whdr.serial = htonl(serial);
  143. trace_rxrpc_tx_ack(call, serial,
  144. ntohl(pkt->ack.firstPacket),
  145. ntohl(pkt->ack.serial),
  146. pkt->ack.reason, pkt->ack.nAcks);
  147. if (ping) {
  148. call->ping_serial = serial;
  149. smp_wmb();
  150. /* We need to stick a time in before we send the packet in case
  151. * the reply gets back before kernel_sendmsg() completes - but
  152. * asking UDP to send the packet can take a relatively long
  153. * time, so we update the time after, on the assumption that
  154. * the packet transmission is more likely to happen towards the
  155. * end of the kernel_sendmsg() call.
  156. */
  157. call->ping_time = ktime_get_real();
  158. set_bit(RXRPC_CALL_PINGING, &call->flags);
  159. trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial);
  160. }
  161. ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
  162. if (ping)
  163. call->ping_time = ktime_get_real();
  164. if (call->state < RXRPC_CALL_COMPLETE) {
  165. if (ret < 0) {
  166. if (ping)
  167. clear_bit(RXRPC_CALL_PINGING, &call->flags);
  168. rxrpc_propose_ACK(call, pkt->ack.reason,
  169. ntohs(pkt->ack.maxSkew),
  170. ntohl(pkt->ack.serial),
  171. true, true,
  172. rxrpc_propose_ack_retry_tx);
  173. } else {
  174. spin_lock_bh(&call->lock);
  175. if (after(hard_ack, call->ackr_consumed))
  176. call->ackr_consumed = hard_ack;
  177. if (after(top, call->ackr_seen))
  178. call->ackr_seen = top;
  179. spin_unlock_bh(&call->lock);
  180. }
  181. }
  182. out:
  183. rxrpc_put_connection(conn);
  184. kfree(pkt);
  185. return ret;
  186. }
  187. /*
  188. * Send an ABORT call packet.
  189. */
  190. int rxrpc_send_abort_packet(struct rxrpc_call *call)
  191. {
  192. struct rxrpc_connection *conn = NULL;
  193. struct rxrpc_abort_buffer pkt;
  194. struct msghdr msg;
  195. struct kvec iov[1];
  196. rxrpc_serial_t serial;
  197. int ret;
  198. spin_lock_bh(&call->lock);
  199. if (call->conn)
  200. conn = rxrpc_get_connection_maybe(call->conn);
  201. spin_unlock_bh(&call->lock);
  202. if (!conn)
  203. return -ECONNRESET;
  204. msg.msg_name = &call->peer->srx.transport;
  205. msg.msg_namelen = call->peer->srx.transport_len;
  206. msg.msg_control = NULL;
  207. msg.msg_controllen = 0;
  208. msg.msg_flags = 0;
  209. pkt.whdr.epoch = htonl(conn->proto.epoch);
  210. pkt.whdr.cid = htonl(call->cid);
  211. pkt.whdr.callNumber = htonl(call->call_id);
  212. pkt.whdr.seq = 0;
  213. pkt.whdr.type = RXRPC_PACKET_TYPE_ABORT;
  214. pkt.whdr.flags = conn->out_clientflag;
  215. pkt.whdr.userStatus = 0;
  216. pkt.whdr.securityIndex = call->security_ix;
  217. pkt.whdr._rsvd = 0;
  218. pkt.whdr.serviceId = htons(call->service_id);
  219. pkt.abort_code = htonl(call->abort_code);
  220. iov[0].iov_base = &pkt;
  221. iov[0].iov_len = sizeof(pkt);
  222. serial = atomic_inc_return(&conn->serial);
  223. pkt.whdr.serial = htonl(serial);
  224. ret = kernel_sendmsg(conn->params.local->socket,
  225. &msg, iov, 1, sizeof(pkt));
  226. rxrpc_put_connection(conn);
  227. return ret;
  228. }
  229. /*
  230. * send a packet through the transport endpoint
  231. */
  232. int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
  233. bool retrans)
  234. {
  235. struct rxrpc_connection *conn = call->conn;
  236. struct rxrpc_wire_header whdr;
  237. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  238. struct msghdr msg;
  239. struct kvec iov[2];
  240. rxrpc_serial_t serial;
  241. size_t len;
  242. bool lost = false;
  243. int ret, opt;
  244. _enter(",{%d}", skb->len);
  245. /* Each transmission of a Tx packet needs a new serial number */
  246. serial = atomic_inc_return(&conn->serial);
  247. whdr.epoch = htonl(conn->proto.epoch);
  248. whdr.cid = htonl(call->cid);
  249. whdr.callNumber = htonl(call->call_id);
  250. whdr.seq = htonl(sp->hdr.seq);
  251. whdr.serial = htonl(serial);
  252. whdr.type = RXRPC_PACKET_TYPE_DATA;
  253. whdr.flags = sp->hdr.flags;
  254. whdr.userStatus = 0;
  255. whdr.securityIndex = call->security_ix;
  256. whdr._rsvd = htons(sp->hdr._rsvd);
  257. whdr.serviceId = htons(call->service_id);
  258. iov[0].iov_base = &whdr;
  259. iov[0].iov_len = sizeof(whdr);
  260. iov[1].iov_base = skb->head;
  261. iov[1].iov_len = skb->len;
  262. len = iov[0].iov_len + iov[1].iov_len;
  263. msg.msg_name = &call->peer->srx.transport;
  264. msg.msg_namelen = call->peer->srx.transport_len;
  265. msg.msg_control = NULL;
  266. msg.msg_controllen = 0;
  267. msg.msg_flags = 0;
  268. /* If our RTT cache needs working on, request an ACK. Also request
  269. * ACKs if a DATA packet appears to have been lost.
  270. */
  271. if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
  272. (retrans ||
  273. call->cong_mode == RXRPC_CALL_SLOW_START ||
  274. (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
  275. ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
  276. ktime_get_real())))
  277. whdr.flags |= RXRPC_REQUEST_ACK;
  278. if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
  279. static int lose;
  280. if ((lose++ & 7) == 7) {
  281. ret = 0;
  282. lost = true;
  283. goto done;
  284. }
  285. }
  286. _proto("Tx DATA %%%u { #%u }", serial, sp->hdr.seq);
  287. /* send the packet with the don't fragment bit set if we currently
  288. * think it's small enough */
  289. if (iov[1].iov_len >= call->peer->maxdata)
  290. goto send_fragmentable;
  291. down_read(&conn->params.local->defrag_sem);
  292. /* send the packet by UDP
  293. * - returns -EMSGSIZE if UDP would have to fragment the packet
  294. * to go out of the interface
  295. * - in which case, we'll have processed the ICMP error
  296. * message and update the peer record
  297. */
  298. ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len);
  299. up_read(&conn->params.local->defrag_sem);
  300. if (ret == -EMSGSIZE)
  301. goto send_fragmentable;
  302. done:
  303. trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
  304. retrans, lost);
  305. if (ret >= 0) {
  306. ktime_t now = ktime_get_real();
  307. skb->tstamp = now;
  308. smp_wmb();
  309. sp->hdr.serial = serial;
  310. if (whdr.flags & RXRPC_REQUEST_ACK) {
  311. call->peer->rtt_last_req = now;
  312. trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
  313. }
  314. }
  315. _leave(" = %d [%u]", ret, call->peer->maxdata);
  316. return ret;
  317. send_fragmentable:
  318. /* attempt to send this message with fragmentation enabled */
  319. _debug("send fragment");
  320. down_write(&conn->params.local->defrag_sem);
  321. switch (conn->params.local->srx.transport.family) {
  322. case AF_INET:
  323. opt = IP_PMTUDISC_DONT;
  324. ret = kernel_setsockopt(conn->params.local->socket,
  325. SOL_IP, IP_MTU_DISCOVER,
  326. (char *)&opt, sizeof(opt));
  327. if (ret == 0) {
  328. ret = kernel_sendmsg(conn->params.local->socket, &msg,
  329. iov, 2, len);
  330. opt = IP_PMTUDISC_DO;
  331. kernel_setsockopt(conn->params.local->socket, SOL_IP,
  332. IP_MTU_DISCOVER,
  333. (char *)&opt, sizeof(opt));
  334. }
  335. break;
  336. #ifdef CONFIG_AF_RXRPC_IPV6
  337. case AF_INET6:
  338. opt = IPV6_PMTUDISC_DONT;
  339. ret = kernel_setsockopt(conn->params.local->socket,
  340. SOL_IPV6, IPV6_MTU_DISCOVER,
  341. (char *)&opt, sizeof(opt));
  342. if (ret == 0) {
  343. ret = kernel_sendmsg(conn->params.local->socket, &msg,
  344. iov, 2, len);
  345. opt = IPV6_PMTUDISC_DO;
  346. kernel_setsockopt(conn->params.local->socket,
  347. SOL_IPV6, IPV6_MTU_DISCOVER,
  348. (char *)&opt, sizeof(opt));
  349. }
  350. break;
  351. #endif
  352. }
  353. up_write(&conn->params.local->defrag_sem);
  354. goto done;
  355. }
  356. /*
  357. * reject packets through the local endpoint
  358. */
  359. void rxrpc_reject_packets(struct rxrpc_local *local)
  360. {
  361. struct sockaddr_rxrpc srx;
  362. struct rxrpc_skb_priv *sp;
  363. struct rxrpc_wire_header whdr;
  364. struct sk_buff *skb;
  365. struct msghdr msg;
  366. struct kvec iov[2];
  367. size_t size;
  368. __be32 code;
  369. _enter("%d", local->debug_id);
  370. iov[0].iov_base = &whdr;
  371. iov[0].iov_len = sizeof(whdr);
  372. iov[1].iov_base = &code;
  373. iov[1].iov_len = sizeof(code);
  374. size = sizeof(whdr) + sizeof(code);
  375. msg.msg_name = &srx.transport;
  376. msg.msg_control = NULL;
  377. msg.msg_controllen = 0;
  378. msg.msg_flags = 0;
  379. memset(&whdr, 0, sizeof(whdr));
  380. whdr.type = RXRPC_PACKET_TYPE_ABORT;
  381. while ((skb = skb_dequeue(&local->reject_queue))) {
  382. rxrpc_see_skb(skb, rxrpc_skb_rx_seen);
  383. sp = rxrpc_skb(skb);
  384. if (rxrpc_extract_addr_from_skb(&srx, skb) == 0) {
  385. msg.msg_namelen = srx.transport_len;
  386. code = htonl(skb->priority);
  387. whdr.epoch = htonl(sp->hdr.epoch);
  388. whdr.cid = htonl(sp->hdr.cid);
  389. whdr.callNumber = htonl(sp->hdr.callNumber);
  390. whdr.serviceId = htons(sp->hdr.serviceId);
  391. whdr.flags = sp->hdr.flags;
  392. whdr.flags ^= RXRPC_CLIENT_INITIATED;
  393. whdr.flags &= RXRPC_CLIENT_INITIATED;
  394. kernel_sendmsg(local->socket, &msg, iov, 2, size);
  395. }
  396. rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
  397. }
  398. _leave("");
  399. }