ar-output.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739
  1. /* RxRPC packet transmission
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/net.h>
  12. #include <linux/gfp.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/circ_buf.h>
  15. #include <net/sock.h>
  16. #include <net/af_rxrpc.h>
  17. #include "ar-internal.h"
  18. int rxrpc_resend_timeout = 4;
  19. static int rxrpc_send_data(struct kiocb *iocb,
  20. struct rxrpc_sock *rx,
  21. struct rxrpc_call *call,
  22. struct msghdr *msg, size_t len);
  23. /*
  24. * extract control messages from the sendmsg() control buffer
  25. */
  26. static int rxrpc_sendmsg_cmsg(struct rxrpc_sock *rx, struct msghdr *msg,
  27. unsigned long *user_call_ID,
  28. enum rxrpc_command *command,
  29. u32 *abort_code,
  30. bool server)
  31. {
  32. struct cmsghdr *cmsg;
  33. int len;
  34. *command = RXRPC_CMD_SEND_DATA;
  35. if (msg->msg_controllen == 0)
  36. return -EINVAL;
  37. for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
  38. if (!CMSG_OK(msg, cmsg))
  39. return -EINVAL;
  40. len = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
  41. _debug("CMSG %d, %d, %d",
  42. cmsg->cmsg_level, cmsg->cmsg_type, len);
  43. if (cmsg->cmsg_level != SOL_RXRPC)
  44. continue;
  45. switch (cmsg->cmsg_type) {
  46. case RXRPC_USER_CALL_ID:
  47. if (msg->msg_flags & MSG_CMSG_COMPAT) {
  48. if (len != sizeof(u32))
  49. return -EINVAL;
  50. *user_call_ID = *(u32 *) CMSG_DATA(cmsg);
  51. } else {
  52. if (len != sizeof(unsigned long))
  53. return -EINVAL;
  54. *user_call_ID = *(unsigned long *)
  55. CMSG_DATA(cmsg);
  56. }
  57. _debug("User Call ID %lx", *user_call_ID);
  58. break;
  59. case RXRPC_ABORT:
  60. if (*command != RXRPC_CMD_SEND_DATA)
  61. return -EINVAL;
  62. *command = RXRPC_CMD_SEND_ABORT;
  63. if (len != sizeof(*abort_code))
  64. return -EINVAL;
  65. *abort_code = *(unsigned int *) CMSG_DATA(cmsg);
  66. _debug("Abort %x", *abort_code);
  67. if (*abort_code == 0)
  68. return -EINVAL;
  69. break;
  70. case RXRPC_ACCEPT:
  71. if (*command != RXRPC_CMD_SEND_DATA)
  72. return -EINVAL;
  73. *command = RXRPC_CMD_ACCEPT;
  74. if (len != 0)
  75. return -EINVAL;
  76. if (!server)
  77. return -EISCONN;
  78. break;
  79. default:
  80. return -EINVAL;
  81. }
  82. }
  83. _leave(" = 0");
  84. return 0;
  85. }
  86. /*
  87. * abort a call, sending an ABORT packet to the peer
  88. */
  89. static void rxrpc_send_abort(struct rxrpc_call *call, u32 abort_code)
  90. {
  91. write_lock_bh(&call->state_lock);
  92. if (call->state <= RXRPC_CALL_COMPLETE) {
  93. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  94. call->abort_code = abort_code;
  95. set_bit(RXRPC_CALL_ABORT, &call->events);
  96. del_timer_sync(&call->resend_timer);
  97. del_timer_sync(&call->ack_timer);
  98. clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
  99. clear_bit(RXRPC_CALL_ACK, &call->events);
  100. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  101. rxrpc_queue_call(call);
  102. }
  103. write_unlock_bh(&call->state_lock);
  104. }
  105. /*
  106. * send a message forming part of a client call through an RxRPC socket
  107. * - caller holds the socket locked
  108. * - the socket may be either a client socket or a server socket
  109. */
  110. int rxrpc_client_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx,
  111. struct rxrpc_transport *trans, struct msghdr *msg,
  112. size_t len)
  113. {
  114. struct rxrpc_conn_bundle *bundle;
  115. enum rxrpc_command cmd;
  116. struct rxrpc_call *call;
  117. unsigned long user_call_ID = 0;
  118. struct key *key;
  119. __be16 service_id;
  120. u32 abort_code = 0;
  121. int ret;
  122. _enter("");
  123. ASSERT(trans != NULL);
  124. ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code,
  125. false);
  126. if (ret < 0)
  127. return ret;
  128. bundle = NULL;
  129. if (trans) {
  130. service_id = rx->service_id;
  131. if (msg->msg_name) {
  132. struct sockaddr_rxrpc *srx =
  133. (struct sockaddr_rxrpc *) msg->msg_name;
  134. service_id = htons(srx->srx_service);
  135. }
  136. key = rx->key;
  137. if (key && !rx->key->payload.data)
  138. key = NULL;
  139. bundle = rxrpc_get_bundle(rx, trans, key, service_id,
  140. GFP_KERNEL);
  141. if (IS_ERR(bundle))
  142. return PTR_ERR(bundle);
  143. }
  144. call = rxrpc_get_client_call(rx, trans, bundle, user_call_ID,
  145. abort_code == 0, GFP_KERNEL);
  146. if (trans)
  147. rxrpc_put_bundle(trans, bundle);
  148. if (IS_ERR(call)) {
  149. _leave(" = %ld", PTR_ERR(call));
  150. return PTR_ERR(call);
  151. }
  152. _debug("CALL %d USR %lx ST %d on CONN %p",
  153. call->debug_id, call->user_call_ID, call->state, call->conn);
  154. if (call->state >= RXRPC_CALL_COMPLETE) {
  155. /* it's too late for this call */
  156. ret = -ESHUTDOWN;
  157. } else if (cmd == RXRPC_CMD_SEND_ABORT) {
  158. rxrpc_send_abort(call, abort_code);
  159. } else if (cmd != RXRPC_CMD_SEND_DATA) {
  160. ret = -EINVAL;
  161. } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
  162. /* request phase complete for this client call */
  163. ret = -EPROTO;
  164. } else {
  165. ret = rxrpc_send_data(iocb, rx, call, msg, len);
  166. }
  167. rxrpc_put_call(call);
  168. _leave(" = %d", ret);
  169. return ret;
  170. }
  171. /**
  172. * rxrpc_kernel_send_data - Allow a kernel service to send data on a call
  173. * @call: The call to send data through
  174. * @msg: The data to send
  175. * @len: The amount of data to send
  176. *
  177. * Allow a kernel service to send data on a call. The call must be in an state
  178. * appropriate to sending data. No control data should be supplied in @msg,
  179. * nor should an address be supplied. MSG_MORE should be flagged if there's
  180. * more data to come, otherwise this data will end the transmission phase.
  181. */
  182. int rxrpc_kernel_send_data(struct rxrpc_call *call, struct msghdr *msg,
  183. size_t len)
  184. {
  185. int ret;
  186. _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
  187. ASSERTCMP(msg->msg_name, ==, NULL);
  188. ASSERTCMP(msg->msg_control, ==, NULL);
  189. lock_sock(&call->socket->sk);
  190. _debug("CALL %d USR %lx ST %d on CONN %p",
  191. call->debug_id, call->user_call_ID, call->state, call->conn);
  192. if (call->state >= RXRPC_CALL_COMPLETE) {
  193. ret = -ESHUTDOWN; /* it's too late for this call */
  194. } else if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
  195. call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
  196. call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
  197. ret = -EPROTO; /* request phase complete for this client call */
  198. } else {
  199. mm_segment_t oldfs = get_fs();
  200. set_fs(KERNEL_DS);
  201. ret = rxrpc_send_data(NULL, call->socket, call, msg, len);
  202. set_fs(oldfs);
  203. }
  204. release_sock(&call->socket->sk);
  205. _leave(" = %d", ret);
  206. return ret;
  207. }
  208. EXPORT_SYMBOL(rxrpc_kernel_send_data);
  209. /*
  210. * rxrpc_kernel_abort_call - Allow a kernel service to abort a call
  211. * @call: The call to be aborted
  212. * @abort_code: The abort code to stick into the ABORT packet
  213. *
  214. * Allow a kernel service to abort a call, if it's still in an abortable state.
  215. */
  216. void rxrpc_kernel_abort_call(struct rxrpc_call *call, u32 abort_code)
  217. {
  218. _enter("{%d},%d", call->debug_id, abort_code);
  219. lock_sock(&call->socket->sk);
  220. _debug("CALL %d USR %lx ST %d on CONN %p",
  221. call->debug_id, call->user_call_ID, call->state, call->conn);
  222. if (call->state < RXRPC_CALL_COMPLETE)
  223. rxrpc_send_abort(call, abort_code);
  224. release_sock(&call->socket->sk);
  225. _leave("");
  226. }
  227. EXPORT_SYMBOL(rxrpc_kernel_abort_call);
  228. /*
  229. * send a message through a server socket
  230. * - caller holds the socket locked
  231. */
  232. int rxrpc_server_sendmsg(struct kiocb *iocb, struct rxrpc_sock *rx,
  233. struct msghdr *msg, size_t len)
  234. {
  235. enum rxrpc_command cmd;
  236. struct rxrpc_call *call;
  237. unsigned long user_call_ID = 0;
  238. u32 abort_code = 0;
  239. int ret;
  240. _enter("");
  241. ret = rxrpc_sendmsg_cmsg(rx, msg, &user_call_ID, &cmd, &abort_code,
  242. true);
  243. if (ret < 0)
  244. return ret;
  245. if (cmd == RXRPC_CMD_ACCEPT) {
  246. call = rxrpc_accept_call(rx, user_call_ID);
  247. if (IS_ERR(call))
  248. return PTR_ERR(call);
  249. rxrpc_put_call(call);
  250. return 0;
  251. }
  252. call = rxrpc_find_server_call(rx, user_call_ID);
  253. if (!call)
  254. return -EBADSLT;
  255. if (call->state >= RXRPC_CALL_COMPLETE) {
  256. ret = -ESHUTDOWN;
  257. goto out;
  258. }
  259. switch (cmd) {
  260. case RXRPC_CMD_SEND_DATA:
  261. if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
  262. call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
  263. call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
  264. /* Tx phase not yet begun for this call */
  265. ret = -EPROTO;
  266. break;
  267. }
  268. ret = rxrpc_send_data(iocb, rx, call, msg, len);
  269. break;
  270. case RXRPC_CMD_SEND_ABORT:
  271. rxrpc_send_abort(call, abort_code);
  272. break;
  273. default:
  274. BUG();
  275. }
  276. out:
  277. rxrpc_put_call(call);
  278. _leave(" = %d", ret);
  279. return ret;
  280. }
  281. /*
  282. * send a packet through the transport endpoint
  283. */
  284. int rxrpc_send_packet(struct rxrpc_transport *trans, struct sk_buff *skb)
  285. {
  286. struct kvec iov[1];
  287. struct msghdr msg;
  288. int ret, opt;
  289. _enter(",{%d}", skb->len);
  290. iov[0].iov_base = skb->head;
  291. iov[0].iov_len = skb->len;
  292. msg.msg_name = &trans->peer->srx.transport.sin;
  293. msg.msg_namelen = sizeof(trans->peer->srx.transport.sin);
  294. msg.msg_control = NULL;
  295. msg.msg_controllen = 0;
  296. msg.msg_flags = 0;
  297. /* send the packet with the don't fragment bit set if we currently
  298. * think it's small enough */
  299. if (skb->len - sizeof(struct rxrpc_header) < trans->peer->maxdata) {
  300. down_read(&trans->local->defrag_sem);
  301. /* send the packet by UDP
  302. * - returns -EMSGSIZE if UDP would have to fragment the packet
  303. * to go out of the interface
  304. * - in which case, we'll have processed the ICMP error
  305. * message and update the peer record
  306. */
  307. ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1,
  308. iov[0].iov_len);
  309. up_read(&trans->local->defrag_sem);
  310. if (ret == -EMSGSIZE)
  311. goto send_fragmentable;
  312. _leave(" = %d [%u]", ret, trans->peer->maxdata);
  313. return ret;
  314. }
  315. send_fragmentable:
  316. /* attempt to send this message with fragmentation enabled */
  317. _debug("send fragment");
  318. down_write(&trans->local->defrag_sem);
  319. opt = IP_PMTUDISC_DONT;
  320. ret = kernel_setsockopt(trans->local->socket, SOL_IP, IP_MTU_DISCOVER,
  321. (char *) &opt, sizeof(opt));
  322. if (ret == 0) {
  323. ret = kernel_sendmsg(trans->local->socket, &msg, iov, 1,
  324. iov[0].iov_len);
  325. opt = IP_PMTUDISC_DO;
  326. kernel_setsockopt(trans->local->socket, SOL_IP,
  327. IP_MTU_DISCOVER, (char *) &opt, sizeof(opt));
  328. }
  329. up_write(&trans->local->defrag_sem);
  330. _leave(" = %d [frag %u]", ret, trans->peer->maxdata);
  331. return ret;
  332. }
  333. /*
  334. * wait for space to appear in the transmit/ACK window
  335. * - caller holds the socket locked
  336. */
  337. static int rxrpc_wait_for_tx_window(struct rxrpc_sock *rx,
  338. struct rxrpc_call *call,
  339. long *timeo)
  340. {
  341. DECLARE_WAITQUEUE(myself, current);
  342. int ret;
  343. _enter(",{%d},%ld",
  344. CIRC_SPACE(call->acks_head, call->acks_tail, call->acks_winsz),
  345. *timeo);
  346. add_wait_queue(&call->tx_waitq, &myself);
  347. for (;;) {
  348. set_current_state(TASK_INTERRUPTIBLE);
  349. ret = 0;
  350. if (CIRC_SPACE(call->acks_head, call->acks_tail,
  351. call->acks_winsz) > 0)
  352. break;
  353. if (signal_pending(current)) {
  354. ret = sock_intr_errno(*timeo);
  355. break;
  356. }
  357. release_sock(&rx->sk);
  358. *timeo = schedule_timeout(*timeo);
  359. lock_sock(&rx->sk);
  360. }
  361. remove_wait_queue(&call->tx_waitq, &myself);
  362. set_current_state(TASK_RUNNING);
  363. _leave(" = %d", ret);
  364. return ret;
  365. }
  366. /*
  367. * attempt to schedule an instant Tx resend
  368. */
  369. static inline void rxrpc_instant_resend(struct rxrpc_call *call)
  370. {
  371. read_lock_bh(&call->state_lock);
  372. if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
  373. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  374. if (call->state < RXRPC_CALL_COMPLETE &&
  375. !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
  376. rxrpc_queue_call(call);
  377. }
  378. read_unlock_bh(&call->state_lock);
  379. }
  380. /*
  381. * queue a packet for transmission, set the resend timer and attempt
  382. * to send the packet immediately
  383. */
  384. static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
  385. bool last)
  386. {
  387. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  388. int ret;
  389. _net("queue skb %p [%d]", skb, call->acks_head);
  390. ASSERT(call->acks_window != NULL);
  391. call->acks_window[call->acks_head] = (unsigned long) skb;
  392. smp_wmb();
  393. call->acks_head = (call->acks_head + 1) & (call->acks_winsz - 1);
  394. if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
  395. _debug("________awaiting reply/ACK__________");
  396. write_lock_bh(&call->state_lock);
  397. switch (call->state) {
  398. case RXRPC_CALL_CLIENT_SEND_REQUEST:
  399. call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
  400. break;
  401. case RXRPC_CALL_SERVER_ACK_REQUEST:
  402. call->state = RXRPC_CALL_SERVER_SEND_REPLY;
  403. if (!last)
  404. break;
  405. case RXRPC_CALL_SERVER_SEND_REPLY:
  406. call->state = RXRPC_CALL_SERVER_AWAIT_ACK;
  407. break;
  408. default:
  409. break;
  410. }
  411. write_unlock_bh(&call->state_lock);
  412. }
  413. _proto("Tx DATA %%%u { #%u }",
  414. ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
  415. sp->need_resend = 0;
  416. sp->resend_at = jiffies + rxrpc_resend_timeout * HZ;
  417. if (!test_and_set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags)) {
  418. _debug("run timer");
  419. call->resend_timer.expires = sp->resend_at;
  420. add_timer(&call->resend_timer);
  421. }
  422. /* attempt to cancel the rx-ACK timer, deferring reply transmission if
  423. * we're ACK'ing the request phase of an incoming call */
  424. ret = -EAGAIN;
  425. if (try_to_del_timer_sync(&call->ack_timer) >= 0) {
  426. /* the packet may be freed by rxrpc_process_call() before this
  427. * returns */
  428. ret = rxrpc_send_packet(call->conn->trans, skb);
  429. _net("sent skb %p", skb);
  430. } else {
  431. _debug("failed to delete ACK timer");
  432. }
  433. if (ret < 0) {
  434. _debug("need instant resend %d", ret);
  435. sp->need_resend = 1;
  436. rxrpc_instant_resend(call);
  437. }
  438. _leave("");
  439. }
  440. /*
  441. * send data through a socket
  442. * - must be called in process context
  443. * - caller holds the socket locked
  444. */
  445. static int rxrpc_send_data(struct kiocb *iocb,
  446. struct rxrpc_sock *rx,
  447. struct rxrpc_call *call,
  448. struct msghdr *msg, size_t len)
  449. {
  450. struct rxrpc_skb_priv *sp;
  451. unsigned char __user *from;
  452. struct sk_buff *skb;
  453. struct iovec *iov;
  454. struct sock *sk = &rx->sk;
  455. long timeo;
  456. bool more;
  457. int ret, ioc, segment, copied;
  458. _enter(",,,{%zu},%zu", msg->msg_iovlen, len);
  459. timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
  460. /* this should be in poll */
  461. clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
  462. if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
  463. return -EPIPE;
  464. iov = msg->msg_iov;
  465. ioc = msg->msg_iovlen - 1;
  466. from = iov->iov_base;
  467. segment = iov->iov_len;
  468. iov++;
  469. more = msg->msg_flags & MSG_MORE;
  470. skb = call->tx_pending;
  471. call->tx_pending = NULL;
  472. copied = 0;
  473. do {
  474. int copy;
  475. if (segment > len)
  476. segment = len;
  477. _debug("SEGMENT %d @%p", segment, from);
  478. if (!skb) {
  479. size_t size, chunk, max, space;
  480. _debug("alloc");
  481. if (CIRC_SPACE(call->acks_head, call->acks_tail,
  482. call->acks_winsz) <= 0) {
  483. ret = -EAGAIN;
  484. if (msg->msg_flags & MSG_DONTWAIT)
  485. goto maybe_error;
  486. ret = rxrpc_wait_for_tx_window(rx, call,
  487. &timeo);
  488. if (ret < 0)
  489. goto maybe_error;
  490. }
  491. max = call->conn->trans->peer->maxdata;
  492. max -= call->conn->security_size;
  493. max &= ~(call->conn->size_align - 1UL);
  494. chunk = max;
  495. if (chunk > len && !more)
  496. chunk = len;
  497. space = chunk + call->conn->size_align;
  498. space &= ~(call->conn->size_align - 1UL);
  499. size = space + call->conn->header_size;
  500. _debug("SIZE: %zu/%zu/%zu", chunk, space, size);
  501. /* create a buffer that we can retain until it's ACK'd */
  502. skb = sock_alloc_send_skb(
  503. sk, size, msg->msg_flags & MSG_DONTWAIT, &ret);
  504. if (!skb)
  505. goto maybe_error;
  506. rxrpc_new_skb(skb);
  507. _debug("ALLOC SEND %p", skb);
  508. ASSERTCMP(skb->mark, ==, 0);
  509. _debug("HS: %u", call->conn->header_size);
  510. skb_reserve(skb, call->conn->header_size);
  511. skb->len += call->conn->header_size;
  512. sp = rxrpc_skb(skb);
  513. sp->remain = chunk;
  514. if (sp->remain > skb_tailroom(skb))
  515. sp->remain = skb_tailroom(skb);
  516. _net("skb: hr %d, tr %d, hl %d, rm %d",
  517. skb_headroom(skb),
  518. skb_tailroom(skb),
  519. skb_headlen(skb),
  520. sp->remain);
  521. skb->ip_summed = CHECKSUM_UNNECESSARY;
  522. }
  523. _debug("append");
  524. sp = rxrpc_skb(skb);
  525. /* append next segment of data to the current buffer */
  526. copy = skb_tailroom(skb);
  527. ASSERTCMP(copy, >, 0);
  528. if (copy > segment)
  529. copy = segment;
  530. if (copy > sp->remain)
  531. copy = sp->remain;
  532. _debug("add");
  533. ret = skb_add_data(skb, from, copy);
  534. _debug("added");
  535. if (ret < 0)
  536. goto efault;
  537. sp->remain -= copy;
  538. skb->mark += copy;
  539. copied += copy;
  540. len -= copy;
  541. segment -= copy;
  542. from += copy;
  543. while (segment == 0 && ioc > 0) {
  544. from = iov->iov_base;
  545. segment = iov->iov_len;
  546. iov++;
  547. ioc--;
  548. }
  549. if (len == 0) {
  550. segment = 0;
  551. ioc = 0;
  552. }
  553. /* check for the far side aborting the call or a network error
  554. * occurring */
  555. if (call->state > RXRPC_CALL_COMPLETE)
  556. goto call_aborted;
  557. /* add the packet to the send queue if it's now full */
  558. if (sp->remain <= 0 || (segment == 0 && !more)) {
  559. struct rxrpc_connection *conn = call->conn;
  560. size_t pad;
  561. /* pad out if we're using security */
  562. if (conn->security) {
  563. pad = conn->security_size + skb->mark;
  564. pad = conn->size_align - pad;
  565. pad &= conn->size_align - 1;
  566. _debug("pad %zu", pad);
  567. if (pad)
  568. memset(skb_put(skb, pad), 0, pad);
  569. }
  570. sp->hdr.epoch = conn->epoch;
  571. sp->hdr.cid = call->cid;
  572. sp->hdr.callNumber = call->call_id;
  573. sp->hdr.seq =
  574. htonl(atomic_inc_return(&call->sequence));
  575. sp->hdr.serial =
  576. htonl(atomic_inc_return(&conn->serial));
  577. sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
  578. sp->hdr.userStatus = 0;
  579. sp->hdr.securityIndex = conn->security_ix;
  580. sp->hdr._rsvd = 0;
  581. sp->hdr.serviceId = conn->service_id;
  582. sp->hdr.flags = conn->out_clientflag;
  583. if (len == 0 && !more)
  584. sp->hdr.flags |= RXRPC_LAST_PACKET;
  585. else if (CIRC_SPACE(call->acks_head, call->acks_tail,
  586. call->acks_winsz) > 1)
  587. sp->hdr.flags |= RXRPC_MORE_PACKETS;
  588. ret = rxrpc_secure_packet(
  589. call, skb, skb->mark,
  590. skb->head + sizeof(struct rxrpc_header));
  591. if (ret < 0)
  592. goto out;
  593. memcpy(skb->head, &sp->hdr,
  594. sizeof(struct rxrpc_header));
  595. rxrpc_queue_packet(call, skb, segment == 0 && !more);
  596. skb = NULL;
  597. }
  598. } while (segment > 0);
  599. success:
  600. ret = copied;
  601. out:
  602. call->tx_pending = skb;
  603. _leave(" = %d", ret);
  604. return ret;
  605. call_aborted:
  606. rxrpc_free_skb(skb);
  607. if (call->state == RXRPC_CALL_NETWORK_ERROR)
  608. ret = call->conn->trans->peer->net_error;
  609. else
  610. ret = -ECONNABORTED;
  611. _leave(" = %d", ret);
  612. return ret;
  613. maybe_error:
  614. if (copied)
  615. goto success;
  616. goto out;
  617. efault:
  618. ret = -EFAULT;
  619. goto out;
  620. }