ar-ack.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308
  1. /* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/circ_buf.h>
  13. #include <linux/net.h>
  14. #include <linux/skbuff.h>
  15. #include <linux/slab.h>
  16. #include <linux/udp.h>
  17. #include <net/sock.h>
  18. #include <net/af_rxrpc.h>
  19. #include "ar-internal.h"
  20. static unsigned rxrpc_ack_defer = 1;
  21. static const char *const rxrpc_acks[] = {
  22. "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL",
  23. "-?-"
  24. };
  25. static const s8 rxrpc_ack_priority[] = {
  26. [0] = 0,
  27. [RXRPC_ACK_DELAY] = 1,
  28. [RXRPC_ACK_REQUESTED] = 2,
  29. [RXRPC_ACK_IDLE] = 3,
  30. [RXRPC_ACK_PING_RESPONSE] = 4,
  31. [RXRPC_ACK_DUPLICATE] = 5,
  32. [RXRPC_ACK_OUT_OF_SEQUENCE] = 6,
  33. [RXRPC_ACK_EXCEEDS_WINDOW] = 7,
  34. [RXRPC_ACK_NOSPACE] = 8,
  35. };
  36. /*
  37. * propose an ACK be sent
  38. */
  39. void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
  40. __be32 serial, bool immediate)
  41. {
  42. unsigned long expiry;
  43. s8 prior = rxrpc_ack_priority[ack_reason];
  44. ASSERTCMP(prior, >, 0);
  45. _enter("{%d},%s,%%%x,%u",
  46. call->debug_id, rxrpc_acks[ack_reason], ntohl(serial),
  47. immediate);
  48. if (prior < rxrpc_ack_priority[call->ackr_reason]) {
  49. if (immediate)
  50. goto cancel_timer;
  51. return;
  52. }
  53. /* update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
  54. * numbers */
  55. if (prior == rxrpc_ack_priority[call->ackr_reason]) {
  56. if (prior <= 4)
  57. call->ackr_serial = serial;
  58. if (immediate)
  59. goto cancel_timer;
  60. return;
  61. }
  62. call->ackr_reason = ack_reason;
  63. call->ackr_serial = serial;
  64. switch (ack_reason) {
  65. case RXRPC_ACK_DELAY:
  66. _debug("run delay timer");
  67. call->ack_timer.expires = jiffies + rxrpc_ack_timeout * HZ;
  68. add_timer(&call->ack_timer);
  69. return;
  70. case RXRPC_ACK_IDLE:
  71. if (!immediate) {
  72. _debug("run defer timer");
  73. expiry = 1;
  74. goto run_timer;
  75. }
  76. goto cancel_timer;
  77. case RXRPC_ACK_REQUESTED:
  78. if (!rxrpc_ack_defer)
  79. goto cancel_timer;
  80. if (!immediate || serial == cpu_to_be32(1)) {
  81. _debug("run defer timer");
  82. expiry = rxrpc_ack_defer;
  83. goto run_timer;
  84. }
  85. default:
  86. _debug("immediate ACK");
  87. goto cancel_timer;
  88. }
  89. run_timer:
  90. expiry += jiffies;
  91. if (!timer_pending(&call->ack_timer) ||
  92. time_after(call->ack_timer.expires, expiry))
  93. mod_timer(&call->ack_timer, expiry);
  94. return;
  95. cancel_timer:
  96. _debug("cancel timer %%%u", ntohl(serial));
  97. try_to_del_timer_sync(&call->ack_timer);
  98. read_lock_bh(&call->state_lock);
  99. if (call->state <= RXRPC_CALL_COMPLETE &&
  100. !test_and_set_bit(RXRPC_CALL_ACK, &call->events))
  101. rxrpc_queue_call(call);
  102. read_unlock_bh(&call->state_lock);
  103. }
  104. /*
  105. * propose an ACK be sent, locking the call structure
  106. */
  107. void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
  108. __be32 serial, bool immediate)
  109. {
  110. s8 prior = rxrpc_ack_priority[ack_reason];
  111. if (prior > rxrpc_ack_priority[call->ackr_reason]) {
  112. spin_lock_bh(&call->lock);
  113. __rxrpc_propose_ACK(call, ack_reason, serial, immediate);
  114. spin_unlock_bh(&call->lock);
  115. }
  116. }
  117. /*
  118. * set the resend timer
  119. */
  120. static void rxrpc_set_resend(struct rxrpc_call *call, u8 resend,
  121. unsigned long resend_at)
  122. {
  123. read_lock_bh(&call->state_lock);
  124. if (call->state >= RXRPC_CALL_COMPLETE)
  125. resend = 0;
  126. if (resend & 1) {
  127. _debug("SET RESEND");
  128. set_bit(RXRPC_CALL_RESEND, &call->events);
  129. }
  130. if (resend & 2) {
  131. _debug("MODIFY RESEND TIMER");
  132. set_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  133. mod_timer(&call->resend_timer, resend_at);
  134. } else {
  135. _debug("KILL RESEND TIMER");
  136. del_timer_sync(&call->resend_timer);
  137. clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
  138. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  139. }
  140. read_unlock_bh(&call->state_lock);
  141. }
  142. /*
  143. * resend packets
  144. */
  145. static void rxrpc_resend(struct rxrpc_call *call)
  146. {
  147. struct rxrpc_skb_priv *sp;
  148. struct rxrpc_header *hdr;
  149. struct sk_buff *txb;
  150. unsigned long *p_txb, resend_at;
  151. int loop, stop;
  152. u8 resend;
  153. _enter("{%d,%d,%d,%d},",
  154. call->acks_hard, call->acks_unacked,
  155. atomic_read(&call->sequence),
  156. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
  157. stop = 0;
  158. resend = 0;
  159. resend_at = 0;
  160. for (loop = call->acks_tail;
  161. loop != call->acks_head || stop;
  162. loop = (loop + 1) & (call->acks_winsz - 1)
  163. ) {
  164. p_txb = call->acks_window + loop;
  165. smp_read_barrier_depends();
  166. if (*p_txb & 1)
  167. continue;
  168. txb = (struct sk_buff *) *p_txb;
  169. sp = rxrpc_skb(txb);
  170. if (sp->need_resend) {
  171. sp->need_resend = 0;
  172. /* each Tx packet has a new serial number */
  173. sp->hdr.serial =
  174. htonl(atomic_inc_return(&call->conn->serial));
  175. hdr = (struct rxrpc_header *) txb->head;
  176. hdr->serial = sp->hdr.serial;
  177. _proto("Tx DATA %%%u { #%d }",
  178. ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
  179. if (rxrpc_send_packet(call->conn->trans, txb) < 0) {
  180. stop = 0;
  181. sp->resend_at = jiffies + 3;
  182. } else {
  183. sp->resend_at =
  184. jiffies + rxrpc_resend_timeout * HZ;
  185. }
  186. }
  187. if (time_after_eq(jiffies + 1, sp->resend_at)) {
  188. sp->need_resend = 1;
  189. resend |= 1;
  190. } else if (resend & 2) {
  191. if (time_before(sp->resend_at, resend_at))
  192. resend_at = sp->resend_at;
  193. } else {
  194. resend_at = sp->resend_at;
  195. resend |= 2;
  196. }
  197. }
  198. rxrpc_set_resend(call, resend, resend_at);
  199. _leave("");
  200. }
  201. /*
  202. * handle resend timer expiry
  203. */
  204. static void rxrpc_resend_timer(struct rxrpc_call *call)
  205. {
  206. struct rxrpc_skb_priv *sp;
  207. struct sk_buff *txb;
  208. unsigned long *p_txb, resend_at;
  209. int loop;
  210. u8 resend;
  211. _enter("%d,%d,%d",
  212. call->acks_tail, call->acks_unacked, call->acks_head);
  213. if (call->state >= RXRPC_CALL_COMPLETE)
  214. return;
  215. resend = 0;
  216. resend_at = 0;
  217. for (loop = call->acks_unacked;
  218. loop != call->acks_head;
  219. loop = (loop + 1) & (call->acks_winsz - 1)
  220. ) {
  221. p_txb = call->acks_window + loop;
  222. smp_read_barrier_depends();
  223. txb = (struct sk_buff *) (*p_txb & ~1);
  224. sp = rxrpc_skb(txb);
  225. ASSERT(!(*p_txb & 1));
  226. if (sp->need_resend) {
  227. ;
  228. } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
  229. sp->need_resend = 1;
  230. resend |= 1;
  231. } else if (resend & 2) {
  232. if (time_before(sp->resend_at, resend_at))
  233. resend_at = sp->resend_at;
  234. } else {
  235. resend_at = sp->resend_at;
  236. resend |= 2;
  237. }
  238. }
  239. rxrpc_set_resend(call, resend, resend_at);
  240. _leave("");
  241. }
  242. /*
  243. * process soft ACKs of our transmitted packets
  244. * - these indicate packets the peer has or has not received, but hasn't yet
  245. * given to the consumer, and so can still be discarded and re-requested
  246. */
  247. static int rxrpc_process_soft_ACKs(struct rxrpc_call *call,
  248. struct rxrpc_ackpacket *ack,
  249. struct sk_buff *skb)
  250. {
  251. struct rxrpc_skb_priv *sp;
  252. struct sk_buff *txb;
  253. unsigned long *p_txb, resend_at;
  254. int loop;
  255. u8 sacks[RXRPC_MAXACKS], resend;
  256. _enter("{%d,%d},{%d},",
  257. call->acks_hard,
  258. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz),
  259. ack->nAcks);
  260. if (skb_copy_bits(skb, 0, sacks, ack->nAcks) < 0)
  261. goto protocol_error;
  262. resend = 0;
  263. resend_at = 0;
  264. for (loop = 0; loop < ack->nAcks; loop++) {
  265. p_txb = call->acks_window;
  266. p_txb += (call->acks_tail + loop) & (call->acks_winsz - 1);
  267. smp_read_barrier_depends();
  268. txb = (struct sk_buff *) (*p_txb & ~1);
  269. sp = rxrpc_skb(txb);
  270. switch (sacks[loop]) {
  271. case RXRPC_ACK_TYPE_ACK:
  272. sp->need_resend = 0;
  273. *p_txb |= 1;
  274. break;
  275. case RXRPC_ACK_TYPE_NACK:
  276. sp->need_resend = 1;
  277. *p_txb &= ~1;
  278. resend = 1;
  279. break;
  280. default:
  281. _debug("Unsupported ACK type %d", sacks[loop]);
  282. goto protocol_error;
  283. }
  284. }
  285. smp_mb();
  286. call->acks_unacked = (call->acks_tail + loop) & (call->acks_winsz - 1);
  287. /* anything not explicitly ACK'd is implicitly NACK'd, but may just not
  288. * have been received or processed yet by the far end */
  289. for (loop = call->acks_unacked;
  290. loop != call->acks_head;
  291. loop = (loop + 1) & (call->acks_winsz - 1)
  292. ) {
  293. p_txb = call->acks_window + loop;
  294. smp_read_barrier_depends();
  295. txb = (struct sk_buff *) (*p_txb & ~1);
  296. sp = rxrpc_skb(txb);
  297. if (*p_txb & 1) {
  298. /* packet must have been discarded */
  299. sp->need_resend = 1;
  300. *p_txb &= ~1;
  301. resend |= 1;
  302. } else if (sp->need_resend) {
  303. ;
  304. } else if (time_after_eq(jiffies + 1, sp->resend_at)) {
  305. sp->need_resend = 1;
  306. resend |= 1;
  307. } else if (resend & 2) {
  308. if (time_before(sp->resend_at, resend_at))
  309. resend_at = sp->resend_at;
  310. } else {
  311. resend_at = sp->resend_at;
  312. resend |= 2;
  313. }
  314. }
  315. rxrpc_set_resend(call, resend, resend_at);
  316. _leave(" = 0");
  317. return 0;
  318. protocol_error:
  319. _leave(" = -EPROTO");
  320. return -EPROTO;
  321. }
  322. /*
  323. * discard hard-ACK'd packets from the Tx window
  324. */
  325. static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
  326. {
  327. unsigned long _skb;
  328. int tail = call->acks_tail, old_tail;
  329. int win = CIRC_CNT(call->acks_head, tail, call->acks_winsz);
  330. _enter("{%u,%u},%u", call->acks_hard, win, hard);
  331. ASSERTCMP(hard - call->acks_hard, <=, win);
  332. while (call->acks_hard < hard) {
  333. smp_read_barrier_depends();
  334. _skb = call->acks_window[tail] & ~1;
  335. rxrpc_free_skb((struct sk_buff *) _skb);
  336. old_tail = tail;
  337. tail = (tail + 1) & (call->acks_winsz - 1);
  338. call->acks_tail = tail;
  339. if (call->acks_unacked == old_tail)
  340. call->acks_unacked = tail;
  341. call->acks_hard++;
  342. }
  343. wake_up(&call->tx_waitq);
  344. }
  345. /*
  346. * clear the Tx window in the event of a failure
  347. */
  348. static void rxrpc_clear_tx_window(struct rxrpc_call *call)
  349. {
  350. rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
  351. }
  352. /*
  353. * drain the out of sequence received packet queue into the packet Rx queue
  354. */
  355. static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call)
  356. {
  357. struct rxrpc_skb_priv *sp;
  358. struct sk_buff *skb;
  359. bool terminal;
  360. int ret;
  361. _enter("{%d,%d}", call->rx_data_post, call->rx_first_oos);
  362. spin_lock_bh(&call->lock);
  363. ret = -ECONNRESET;
  364. if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
  365. goto socket_unavailable;
  366. skb = skb_dequeue(&call->rx_oos_queue);
  367. if (skb) {
  368. sp = rxrpc_skb(skb);
  369. _debug("drain OOS packet %d [%d]",
  370. ntohl(sp->hdr.seq), call->rx_first_oos);
  371. if (ntohl(sp->hdr.seq) != call->rx_first_oos) {
  372. skb_queue_head(&call->rx_oos_queue, skb);
  373. call->rx_first_oos = ntohl(rxrpc_skb(skb)->hdr.seq);
  374. _debug("requeue %p {%u}", skb, call->rx_first_oos);
  375. } else {
  376. skb->mark = RXRPC_SKB_MARK_DATA;
  377. terminal = ((sp->hdr.flags & RXRPC_LAST_PACKET) &&
  378. !(sp->hdr.flags & RXRPC_CLIENT_INITIATED));
  379. ret = rxrpc_queue_rcv_skb(call, skb, true, terminal);
  380. BUG_ON(ret < 0);
  381. _debug("drain #%u", call->rx_data_post);
  382. call->rx_data_post++;
  383. /* find out what the next packet is */
  384. skb = skb_peek(&call->rx_oos_queue);
  385. if (skb)
  386. call->rx_first_oos =
  387. ntohl(rxrpc_skb(skb)->hdr.seq);
  388. else
  389. call->rx_first_oos = 0;
  390. _debug("peek %p {%u}", skb, call->rx_first_oos);
  391. }
  392. }
  393. ret = 0;
  394. socket_unavailable:
  395. spin_unlock_bh(&call->lock);
  396. _leave(" = %d", ret);
  397. return ret;
  398. }
  399. /*
  400. * insert an out of sequence packet into the buffer
  401. */
  402. static void rxrpc_insert_oos_packet(struct rxrpc_call *call,
  403. struct sk_buff *skb)
  404. {
  405. struct rxrpc_skb_priv *sp, *psp;
  406. struct sk_buff *p;
  407. u32 seq;
  408. sp = rxrpc_skb(skb);
  409. seq = ntohl(sp->hdr.seq);
  410. _enter(",,{%u}", seq);
  411. skb->destructor = rxrpc_packet_destructor;
  412. ASSERTCMP(sp->call, ==, NULL);
  413. sp->call = call;
  414. rxrpc_get_call(call);
  415. /* insert into the buffer in sequence order */
  416. spin_lock_bh(&call->lock);
  417. skb_queue_walk(&call->rx_oos_queue, p) {
  418. psp = rxrpc_skb(p);
  419. if (ntohl(psp->hdr.seq) > seq) {
  420. _debug("insert oos #%u before #%u",
  421. seq, ntohl(psp->hdr.seq));
  422. skb_insert(p, skb, &call->rx_oos_queue);
  423. goto inserted;
  424. }
  425. }
  426. _debug("append oos #%u", seq);
  427. skb_queue_tail(&call->rx_oos_queue, skb);
  428. inserted:
  429. /* we might now have a new front to the queue */
  430. if (call->rx_first_oos == 0 || seq < call->rx_first_oos)
  431. call->rx_first_oos = seq;
  432. read_lock(&call->state_lock);
  433. if (call->state < RXRPC_CALL_COMPLETE &&
  434. call->rx_data_post == call->rx_first_oos) {
  435. _debug("drain rx oos now");
  436. set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
  437. }
  438. read_unlock(&call->state_lock);
  439. spin_unlock_bh(&call->lock);
  440. _leave(" [stored #%u]", call->rx_first_oos);
  441. }
  442. /*
  443. * clear the Tx window on final ACK reception
  444. */
  445. static void rxrpc_zap_tx_window(struct rxrpc_call *call)
  446. {
  447. struct rxrpc_skb_priv *sp;
  448. struct sk_buff *skb;
  449. unsigned long _skb, *acks_window;
  450. u8 winsz = call->acks_winsz;
  451. int tail;
  452. acks_window = call->acks_window;
  453. call->acks_window = NULL;
  454. while (CIRC_CNT(call->acks_head, call->acks_tail, winsz) > 0) {
  455. tail = call->acks_tail;
  456. smp_read_barrier_depends();
  457. _skb = acks_window[tail] & ~1;
  458. smp_mb();
  459. call->acks_tail = (call->acks_tail + 1) & (winsz - 1);
  460. skb = (struct sk_buff *) _skb;
  461. sp = rxrpc_skb(skb);
  462. _debug("+++ clear Tx %u", ntohl(sp->hdr.seq));
  463. rxrpc_free_skb(skb);
  464. }
  465. kfree(acks_window);
  466. }
  467. /*
  468. * process the extra information that may be appended to an ACK packet
  469. */
  470. static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
  471. unsigned latest, int nAcks)
  472. {
  473. struct rxrpc_ackinfo ackinfo;
  474. struct rxrpc_peer *peer;
  475. unsigned mtu;
  476. if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) {
  477. _leave(" [no ackinfo]");
  478. return;
  479. }
  480. _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
  481. latest,
  482. ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU),
  483. ntohl(ackinfo.rwind), ntohl(ackinfo.jumbo_max));
  484. mtu = min(ntohl(ackinfo.rxMTU), ntohl(ackinfo.maxMTU));
  485. peer = call->conn->trans->peer;
  486. if (mtu < peer->maxdata) {
  487. spin_lock_bh(&peer->lock);
  488. peer->maxdata = mtu;
  489. peer->mtu = mtu + peer->hdrsize;
  490. spin_unlock_bh(&peer->lock);
  491. _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
  492. }
  493. }
  494. /*
  495. * process packets in the reception queue
  496. */
  497. static int rxrpc_process_rx_queue(struct rxrpc_call *call,
  498. u32 *_abort_code)
  499. {
  500. struct rxrpc_ackpacket ack;
  501. struct rxrpc_skb_priv *sp;
  502. struct sk_buff *skb;
  503. bool post_ACK;
  504. int latest;
  505. u32 hard, tx;
  506. _enter("");
  507. process_further:
  508. skb = skb_dequeue(&call->rx_queue);
  509. if (!skb)
  510. return -EAGAIN;
  511. _net("deferred skb %p", skb);
  512. sp = rxrpc_skb(skb);
  513. _debug("process %s [st %d]", rxrpc_pkts[sp->hdr.type], call->state);
  514. post_ACK = false;
  515. switch (sp->hdr.type) {
  516. /* data packets that wind up here have been received out of
  517. * order, need security processing or are jumbo packets */
  518. case RXRPC_PACKET_TYPE_DATA:
  519. _proto("OOSQ DATA %%%u { #%u }",
  520. ntohl(sp->hdr.serial), ntohl(sp->hdr.seq));
  521. /* secured packets must be verified and possibly decrypted */
  522. if (rxrpc_verify_packet(call, skb, _abort_code) < 0)
  523. goto protocol_error;
  524. rxrpc_insert_oos_packet(call, skb);
  525. goto process_further;
  526. /* partial ACK to process */
  527. case RXRPC_PACKET_TYPE_ACK:
  528. if (skb_copy_bits(skb, 0, &ack, sizeof(ack)) < 0) {
  529. _debug("extraction failure");
  530. goto protocol_error;
  531. }
  532. if (!skb_pull(skb, sizeof(ack)))
  533. BUG();
  534. latest = ntohl(sp->hdr.serial);
  535. hard = ntohl(ack.firstPacket);
  536. tx = atomic_read(&call->sequence);
  537. _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
  538. latest,
  539. ntohs(ack.maxSkew),
  540. hard,
  541. ntohl(ack.previousPacket),
  542. ntohl(ack.serial),
  543. rxrpc_acks[ack.reason],
  544. ack.nAcks);
  545. rxrpc_extract_ackinfo(call, skb, latest, ack.nAcks);
  546. if (ack.reason == RXRPC_ACK_PING) {
  547. _proto("Rx ACK %%%u PING Request", latest);
  548. rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
  549. sp->hdr.serial, true);
  550. }
  551. /* discard any out-of-order or duplicate ACKs */
  552. if (latest - call->acks_latest <= 0) {
  553. _debug("discard ACK %d <= %d",
  554. latest, call->acks_latest);
  555. goto discard;
  556. }
  557. call->acks_latest = latest;
  558. if (call->state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
  559. call->state != RXRPC_CALL_CLIENT_AWAIT_REPLY &&
  560. call->state != RXRPC_CALL_SERVER_SEND_REPLY &&
  561. call->state != RXRPC_CALL_SERVER_AWAIT_ACK)
  562. goto discard;
  563. _debug("Tx=%d H=%u S=%d", tx, call->acks_hard, call->state);
  564. if (hard > 0) {
  565. if (hard - 1 > tx) {
  566. _debug("hard-ACK'd packet %d not transmitted"
  567. " (%d top)",
  568. hard - 1, tx);
  569. goto protocol_error;
  570. }
  571. if ((call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY ||
  572. call->state == RXRPC_CALL_SERVER_AWAIT_ACK) &&
  573. hard > tx)
  574. goto all_acked;
  575. smp_rmb();
  576. rxrpc_rotate_tx_window(call, hard - 1);
  577. }
  578. if (ack.nAcks > 0) {
  579. if (hard - 1 + ack.nAcks > tx) {
  580. _debug("soft-ACK'd packet %d+%d not"
  581. " transmitted (%d top)",
  582. hard - 1, ack.nAcks, tx);
  583. goto protocol_error;
  584. }
  585. if (rxrpc_process_soft_ACKs(call, &ack, skb) < 0)
  586. goto protocol_error;
  587. }
  588. goto discard;
  589. /* complete ACK to process */
  590. case RXRPC_PACKET_TYPE_ACKALL:
  591. goto all_acked;
  592. /* abort and busy are handled elsewhere */
  593. case RXRPC_PACKET_TYPE_BUSY:
  594. case RXRPC_PACKET_TYPE_ABORT:
  595. BUG();
  596. /* connection level events - also handled elsewhere */
  597. case RXRPC_PACKET_TYPE_CHALLENGE:
  598. case RXRPC_PACKET_TYPE_RESPONSE:
  599. case RXRPC_PACKET_TYPE_DEBUG:
  600. BUG();
  601. }
  602. /* if we've had a hard ACK that covers all the packets we've sent, then
  603. * that ends that phase of the operation */
  604. all_acked:
  605. write_lock_bh(&call->state_lock);
  606. _debug("ack all %d", call->state);
  607. switch (call->state) {
  608. case RXRPC_CALL_CLIENT_AWAIT_REPLY:
  609. call->state = RXRPC_CALL_CLIENT_RECV_REPLY;
  610. break;
  611. case RXRPC_CALL_SERVER_AWAIT_ACK:
  612. _debug("srv complete");
  613. call->state = RXRPC_CALL_COMPLETE;
  614. post_ACK = true;
  615. break;
  616. case RXRPC_CALL_CLIENT_SEND_REQUEST:
  617. case RXRPC_CALL_SERVER_RECV_REQUEST:
  618. goto protocol_error_unlock; /* can't occur yet */
  619. default:
  620. write_unlock_bh(&call->state_lock);
  621. goto discard; /* assume packet left over from earlier phase */
  622. }
  623. write_unlock_bh(&call->state_lock);
  624. /* if all the packets we sent are hard-ACK'd, then we can discard
  625. * whatever we've got left */
  626. _debug("clear Tx %d",
  627. CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
  628. del_timer_sync(&call->resend_timer);
  629. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  630. clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
  631. if (call->acks_window)
  632. rxrpc_zap_tx_window(call);
  633. if (post_ACK) {
  634. /* post the final ACK message for userspace to pick up */
  635. _debug("post ACK");
  636. skb->mark = RXRPC_SKB_MARK_FINAL_ACK;
  637. sp->call = call;
  638. rxrpc_get_call(call);
  639. spin_lock_bh(&call->lock);
  640. if (rxrpc_queue_rcv_skb(call, skb, true, true) < 0)
  641. BUG();
  642. spin_unlock_bh(&call->lock);
  643. goto process_further;
  644. }
  645. discard:
  646. rxrpc_free_skb(skb);
  647. goto process_further;
  648. protocol_error_unlock:
  649. write_unlock_bh(&call->state_lock);
  650. protocol_error:
  651. rxrpc_free_skb(skb);
  652. _leave(" = -EPROTO");
  653. return -EPROTO;
  654. }
  655. /*
  656. * post a message to the socket Rx queue for recvmsg() to pick up
  657. */
  658. static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
  659. bool fatal)
  660. {
  661. struct rxrpc_skb_priv *sp;
  662. struct sk_buff *skb;
  663. int ret;
  664. _enter("{%d,%lx},%u,%u,%d",
  665. call->debug_id, call->flags, mark, error, fatal);
  666. /* remove timers and things for fatal messages */
  667. if (fatal) {
  668. del_timer_sync(&call->resend_timer);
  669. del_timer_sync(&call->ack_timer);
  670. clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
  671. }
  672. if (mark != RXRPC_SKB_MARK_NEW_CALL &&
  673. !test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
  674. _leave("[no userid]");
  675. return 0;
  676. }
  677. if (!test_bit(RXRPC_CALL_TERMINAL_MSG, &call->flags)) {
  678. skb = alloc_skb(0, GFP_NOFS);
  679. if (!skb)
  680. return -ENOMEM;
  681. rxrpc_new_skb(skb);
  682. skb->mark = mark;
  683. sp = rxrpc_skb(skb);
  684. memset(sp, 0, sizeof(*sp));
  685. sp->error = error;
  686. sp->call = call;
  687. rxrpc_get_call(call);
  688. spin_lock_bh(&call->lock);
  689. ret = rxrpc_queue_rcv_skb(call, skb, true, fatal);
  690. spin_unlock_bh(&call->lock);
  691. BUG_ON(ret < 0);
  692. }
  693. return 0;
  694. }
  695. /*
  696. * handle background processing of incoming call packets and ACK / abort
  697. * generation
  698. */
  699. void rxrpc_process_call(struct work_struct *work)
  700. {
  701. struct rxrpc_call *call =
  702. container_of(work, struct rxrpc_call, processor);
  703. struct rxrpc_ackpacket ack;
  704. struct rxrpc_ackinfo ackinfo;
  705. struct rxrpc_header hdr;
  706. struct msghdr msg;
  707. struct kvec iov[5];
  708. unsigned long bits;
  709. __be32 data, pad;
  710. size_t len;
  711. int genbit, loop, nbit, ioc, ret, mtu;
  712. u32 abort_code = RX_PROTOCOL_ERROR;
  713. u8 *acks = NULL;
  714. //printk("\n--------------------\n");
  715. _enter("{%d,%s,%lx} [%lu]",
  716. call->debug_id, rxrpc_call_states[call->state], call->events,
  717. (jiffies - call->creation_jif) / (HZ / 10));
  718. if (test_and_set_bit(RXRPC_CALL_PROC_BUSY, &call->flags)) {
  719. _debug("XXXXXXXXXXXXX RUNNING ON MULTIPLE CPUS XXXXXXXXXXXXX");
  720. return;
  721. }
  722. /* there's a good chance we're going to have to send a message, so set
  723. * one up in advance */
  724. msg.msg_name = &call->conn->trans->peer->srx.transport.sin;
  725. msg.msg_namelen = sizeof(call->conn->trans->peer->srx.transport.sin);
  726. msg.msg_control = NULL;
  727. msg.msg_controllen = 0;
  728. msg.msg_flags = 0;
  729. hdr.epoch = call->conn->epoch;
  730. hdr.cid = call->cid;
  731. hdr.callNumber = call->call_id;
  732. hdr.seq = 0;
  733. hdr.type = RXRPC_PACKET_TYPE_ACK;
  734. hdr.flags = call->conn->out_clientflag;
  735. hdr.userStatus = 0;
  736. hdr.securityIndex = call->conn->security_ix;
  737. hdr._rsvd = 0;
  738. hdr.serviceId = call->conn->service_id;
  739. memset(iov, 0, sizeof(iov));
  740. iov[0].iov_base = &hdr;
  741. iov[0].iov_len = sizeof(hdr);
  742. /* deal with events of a final nature */
  743. if (test_bit(RXRPC_CALL_RELEASE, &call->events)) {
  744. rxrpc_release_call(call);
  745. clear_bit(RXRPC_CALL_RELEASE, &call->events);
  746. }
  747. if (test_bit(RXRPC_CALL_RCVD_ERROR, &call->events)) {
  748. int error;
  749. clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
  750. clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
  751. clear_bit(RXRPC_CALL_ABORT, &call->events);
  752. error = call->conn->trans->peer->net_error;
  753. _debug("post net error %d", error);
  754. if (rxrpc_post_message(call, RXRPC_SKB_MARK_NET_ERROR,
  755. error, true) < 0)
  756. goto no_mem;
  757. clear_bit(RXRPC_CALL_RCVD_ERROR, &call->events);
  758. goto kill_ACKs;
  759. }
  760. if (test_bit(RXRPC_CALL_CONN_ABORT, &call->events)) {
  761. ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
  762. clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
  763. clear_bit(RXRPC_CALL_ABORT, &call->events);
  764. _debug("post conn abort");
  765. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  766. call->conn->error, true) < 0)
  767. goto no_mem;
  768. clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
  769. goto kill_ACKs;
  770. }
  771. if (test_bit(RXRPC_CALL_REJECT_BUSY, &call->events)) {
  772. hdr.type = RXRPC_PACKET_TYPE_BUSY;
  773. genbit = RXRPC_CALL_REJECT_BUSY;
  774. goto send_message;
  775. }
  776. if (test_bit(RXRPC_CALL_ABORT, &call->events)) {
  777. ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
  778. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  779. ECONNABORTED, true) < 0)
  780. goto no_mem;
  781. hdr.type = RXRPC_PACKET_TYPE_ABORT;
  782. data = htonl(call->abort_code);
  783. iov[1].iov_base = &data;
  784. iov[1].iov_len = sizeof(data);
  785. genbit = RXRPC_CALL_ABORT;
  786. goto send_message;
  787. }
  788. if (test_bit(RXRPC_CALL_ACK_FINAL, &call->events)) {
  789. genbit = RXRPC_CALL_ACK_FINAL;
  790. ack.bufferSpace = htons(8);
  791. ack.maxSkew = 0;
  792. ack.serial = 0;
  793. ack.reason = RXRPC_ACK_IDLE;
  794. ack.nAcks = 0;
  795. call->ackr_reason = 0;
  796. spin_lock_bh(&call->lock);
  797. ack.serial = call->ackr_serial;
  798. ack.previousPacket = call->ackr_prev_seq;
  799. ack.firstPacket = htonl(call->rx_data_eaten + 1);
  800. spin_unlock_bh(&call->lock);
  801. pad = 0;
  802. iov[1].iov_base = &ack;
  803. iov[1].iov_len = sizeof(ack);
  804. iov[2].iov_base = &pad;
  805. iov[2].iov_len = 3;
  806. iov[3].iov_base = &ackinfo;
  807. iov[3].iov_len = sizeof(ackinfo);
  808. goto send_ACK;
  809. }
  810. if (call->events & ((1 << RXRPC_CALL_RCVD_BUSY) |
  811. (1 << RXRPC_CALL_RCVD_ABORT))
  812. ) {
  813. u32 mark;
  814. if (test_bit(RXRPC_CALL_RCVD_ABORT, &call->events))
  815. mark = RXRPC_SKB_MARK_REMOTE_ABORT;
  816. else
  817. mark = RXRPC_SKB_MARK_BUSY;
  818. _debug("post abort/busy");
  819. rxrpc_clear_tx_window(call);
  820. if (rxrpc_post_message(call, mark, ECONNABORTED, true) < 0)
  821. goto no_mem;
  822. clear_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
  823. clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
  824. goto kill_ACKs;
  825. }
  826. if (test_and_clear_bit(RXRPC_CALL_RCVD_ACKALL, &call->events)) {
  827. _debug("do implicit ackall");
  828. rxrpc_clear_tx_window(call);
  829. }
  830. if (test_bit(RXRPC_CALL_LIFE_TIMER, &call->events)) {
  831. write_lock_bh(&call->state_lock);
  832. if (call->state <= RXRPC_CALL_COMPLETE) {
  833. call->state = RXRPC_CALL_LOCALLY_ABORTED;
  834. call->abort_code = RX_CALL_TIMEOUT;
  835. set_bit(RXRPC_CALL_ABORT, &call->events);
  836. }
  837. write_unlock_bh(&call->state_lock);
  838. _debug("post timeout");
  839. if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
  840. ETIME, true) < 0)
  841. goto no_mem;
  842. clear_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
  843. goto kill_ACKs;
  844. }
  845. /* deal with assorted inbound messages */
  846. if (!skb_queue_empty(&call->rx_queue)) {
  847. switch (rxrpc_process_rx_queue(call, &abort_code)) {
  848. case 0:
  849. case -EAGAIN:
  850. break;
  851. case -ENOMEM:
  852. goto no_mem;
  853. case -EKEYEXPIRED:
  854. case -EKEYREJECTED:
  855. case -EPROTO:
  856. rxrpc_abort_call(call, abort_code);
  857. goto kill_ACKs;
  858. }
  859. }
  860. /* handle resending */
  861. if (test_and_clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
  862. rxrpc_resend_timer(call);
  863. if (test_and_clear_bit(RXRPC_CALL_RESEND, &call->events))
  864. rxrpc_resend(call);
  865. /* consider sending an ordinary ACK */
  866. if (test_bit(RXRPC_CALL_ACK, &call->events)) {
  867. _debug("send ACK: window: %d - %d { %lx }",
  868. call->rx_data_eaten, call->ackr_win_top,
  869. call->ackr_window[0]);
  870. if (call->state > RXRPC_CALL_SERVER_ACK_REQUEST &&
  871. call->ackr_reason != RXRPC_ACK_PING_RESPONSE) {
  872. /* ACK by sending reply DATA packet in this state */
  873. clear_bit(RXRPC_CALL_ACK, &call->events);
  874. goto maybe_reschedule;
  875. }
  876. genbit = RXRPC_CALL_ACK;
  877. acks = kzalloc(call->ackr_win_top - call->rx_data_eaten,
  878. GFP_NOFS);
  879. if (!acks)
  880. goto no_mem;
  881. //hdr.flags = RXRPC_SLOW_START_OK;
  882. ack.bufferSpace = htons(8);
  883. ack.maxSkew = 0;
  884. ack.serial = 0;
  885. ack.reason = 0;
  886. spin_lock_bh(&call->lock);
  887. ack.reason = call->ackr_reason;
  888. ack.serial = call->ackr_serial;
  889. ack.previousPacket = call->ackr_prev_seq;
  890. ack.firstPacket = htonl(call->rx_data_eaten + 1);
  891. ack.nAcks = 0;
  892. for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
  893. nbit = loop * BITS_PER_LONG;
  894. for (bits = call->ackr_window[loop]; bits; bits >>= 1
  895. ) {
  896. _debug("- l=%d n=%d b=%lx", loop, nbit, bits);
  897. if (bits & 1) {
  898. acks[nbit] = RXRPC_ACK_TYPE_ACK;
  899. ack.nAcks = nbit + 1;
  900. }
  901. nbit++;
  902. }
  903. }
  904. call->ackr_reason = 0;
  905. spin_unlock_bh(&call->lock);
  906. pad = 0;
  907. iov[1].iov_base = &ack;
  908. iov[1].iov_len = sizeof(ack);
  909. iov[2].iov_base = acks;
  910. iov[2].iov_len = ack.nAcks;
  911. iov[3].iov_base = &pad;
  912. iov[3].iov_len = 3;
  913. iov[4].iov_base = &ackinfo;
  914. iov[4].iov_len = sizeof(ackinfo);
  915. switch (ack.reason) {
  916. case RXRPC_ACK_REQUESTED:
  917. case RXRPC_ACK_DUPLICATE:
  918. case RXRPC_ACK_OUT_OF_SEQUENCE:
  919. case RXRPC_ACK_EXCEEDS_WINDOW:
  920. case RXRPC_ACK_NOSPACE:
  921. case RXRPC_ACK_PING:
  922. case RXRPC_ACK_PING_RESPONSE:
  923. goto send_ACK_with_skew;
  924. case RXRPC_ACK_DELAY:
  925. case RXRPC_ACK_IDLE:
  926. goto send_ACK;
  927. }
  928. }
  929. /* handle completion of security negotiations on an incoming
  930. * connection */
  931. if (test_and_clear_bit(RXRPC_CALL_SECURED, &call->events)) {
  932. _debug("secured");
  933. spin_lock_bh(&call->lock);
  934. if (call->state == RXRPC_CALL_SERVER_SECURING) {
  935. _debug("securing");
  936. write_lock(&call->conn->lock);
  937. if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
  938. !test_bit(RXRPC_CALL_RELEASE, &call->events)) {
  939. _debug("not released");
  940. call->state = RXRPC_CALL_SERVER_ACCEPTING;
  941. list_move_tail(&call->accept_link,
  942. &call->socket->acceptq);
  943. }
  944. write_unlock(&call->conn->lock);
  945. read_lock(&call->state_lock);
  946. if (call->state < RXRPC_CALL_COMPLETE)
  947. set_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
  948. read_unlock(&call->state_lock);
  949. }
  950. spin_unlock_bh(&call->lock);
  951. if (!test_bit(RXRPC_CALL_POST_ACCEPT, &call->events))
  952. goto maybe_reschedule;
  953. }
  954. /* post a notification of an acceptable connection to the app */
  955. if (test_bit(RXRPC_CALL_POST_ACCEPT, &call->events)) {
  956. _debug("post accept");
  957. if (rxrpc_post_message(call, RXRPC_SKB_MARK_NEW_CALL,
  958. 0, false) < 0)
  959. goto no_mem;
  960. clear_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
  961. goto maybe_reschedule;
  962. }
  963. /* handle incoming call acceptance */
  964. if (test_and_clear_bit(RXRPC_CALL_ACCEPTED, &call->events)) {
  965. _debug("accepted");
  966. ASSERTCMP(call->rx_data_post, ==, 0);
  967. call->rx_data_post = 1;
  968. read_lock_bh(&call->state_lock);
  969. if (call->state < RXRPC_CALL_COMPLETE)
  970. set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
  971. read_unlock_bh(&call->state_lock);
  972. }
  973. /* drain the out of sequence received packet queue into the packet Rx
  974. * queue */
  975. if (test_and_clear_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) {
  976. while (call->rx_data_post == call->rx_first_oos)
  977. if (rxrpc_drain_rx_oos_queue(call) < 0)
  978. break;
  979. goto maybe_reschedule;
  980. }
  981. /* other events may have been raised since we started checking */
  982. goto maybe_reschedule;
  983. send_ACK_with_skew:
  984. ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
  985. ntohl(ack.serial));
  986. send_ACK:
  987. mtu = call->conn->trans->peer->if_mtu;
  988. mtu -= call->conn->trans->peer->hdrsize;
  989. ackinfo.maxMTU = htonl(mtu);
  990. ackinfo.rwind = htonl(32);
  991. /* permit the peer to send us jumbo packets if it wants to */
  992. ackinfo.rxMTU = htonl(5692);
  993. ackinfo.jumbo_max = htonl(4);
  994. hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
  995. _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
  996. ntohl(hdr.serial),
  997. ntohs(ack.maxSkew),
  998. ntohl(ack.firstPacket),
  999. ntohl(ack.previousPacket),
  1000. ntohl(ack.serial),
  1001. rxrpc_acks[ack.reason],
  1002. ack.nAcks);
  1003. del_timer_sync(&call->ack_timer);
  1004. if (ack.nAcks > 0)
  1005. set_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags);
  1006. goto send_message_2;
  1007. send_message:
  1008. _debug("send message");
  1009. hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
  1010. _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
  1011. send_message_2:
  1012. len = iov[0].iov_len;
  1013. ioc = 1;
  1014. if (iov[4].iov_len) {
  1015. ioc = 5;
  1016. len += iov[4].iov_len;
  1017. len += iov[3].iov_len;
  1018. len += iov[2].iov_len;
  1019. len += iov[1].iov_len;
  1020. } else if (iov[3].iov_len) {
  1021. ioc = 4;
  1022. len += iov[3].iov_len;
  1023. len += iov[2].iov_len;
  1024. len += iov[1].iov_len;
  1025. } else if (iov[2].iov_len) {
  1026. ioc = 3;
  1027. len += iov[2].iov_len;
  1028. len += iov[1].iov_len;
  1029. } else if (iov[1].iov_len) {
  1030. ioc = 2;
  1031. len += iov[1].iov_len;
  1032. }
  1033. ret = kernel_sendmsg(call->conn->trans->local->socket,
  1034. &msg, iov, ioc, len);
  1035. if (ret < 0) {
  1036. _debug("sendmsg failed: %d", ret);
  1037. read_lock_bh(&call->state_lock);
  1038. if (call->state < RXRPC_CALL_DEAD)
  1039. rxrpc_queue_call(call);
  1040. read_unlock_bh(&call->state_lock);
  1041. goto error;
  1042. }
  1043. switch (genbit) {
  1044. case RXRPC_CALL_ABORT:
  1045. clear_bit(genbit, &call->events);
  1046. clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
  1047. goto kill_ACKs;
  1048. case RXRPC_CALL_ACK_FINAL:
  1049. write_lock_bh(&call->state_lock);
  1050. if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK)
  1051. call->state = RXRPC_CALL_COMPLETE;
  1052. write_unlock_bh(&call->state_lock);
  1053. goto kill_ACKs;
  1054. default:
  1055. clear_bit(genbit, &call->events);
  1056. switch (call->state) {
  1057. case RXRPC_CALL_CLIENT_AWAIT_REPLY:
  1058. case RXRPC_CALL_CLIENT_RECV_REPLY:
  1059. case RXRPC_CALL_SERVER_RECV_REQUEST:
  1060. case RXRPC_CALL_SERVER_ACK_REQUEST:
  1061. _debug("start ACK timer");
  1062. rxrpc_propose_ACK(call, RXRPC_ACK_DELAY,
  1063. call->ackr_serial, false);
  1064. default:
  1065. break;
  1066. }
  1067. goto maybe_reschedule;
  1068. }
  1069. kill_ACKs:
  1070. del_timer_sync(&call->ack_timer);
  1071. if (test_and_clear_bit(RXRPC_CALL_ACK_FINAL, &call->events))
  1072. rxrpc_put_call(call);
  1073. clear_bit(RXRPC_CALL_ACK, &call->events);
  1074. maybe_reschedule:
  1075. if (call->events || !skb_queue_empty(&call->rx_queue)) {
  1076. read_lock_bh(&call->state_lock);
  1077. if (call->state < RXRPC_CALL_DEAD)
  1078. rxrpc_queue_call(call);
  1079. read_unlock_bh(&call->state_lock);
  1080. }
  1081. /* don't leave aborted connections on the accept queue */
  1082. if (call->state >= RXRPC_CALL_COMPLETE &&
  1083. !list_empty(&call->accept_link)) {
  1084. _debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }",
  1085. call, call->events, call->flags,
  1086. ntohl(call->conn->cid));
  1087. read_lock_bh(&call->state_lock);
  1088. if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
  1089. !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
  1090. rxrpc_queue_call(call);
  1091. read_unlock_bh(&call->state_lock);
  1092. }
  1093. error:
  1094. clear_bit(RXRPC_CALL_PROC_BUSY, &call->flags);
  1095. kfree(acks);
  1096. /* because we don't want two CPUs both processing the work item for one
  1097. * call at the same time, we use a flag to note when it's busy; however
  1098. * this means there's a race between clearing the flag and setting the
  1099. * work pending bit and the work item being processed again */
  1100. if (call->events && !work_pending(&call->processor)) {
  1101. _debug("jumpstart %x", ntohl(call->conn->cid));
  1102. rxrpc_queue_call(call);
  1103. }
  1104. _leave("");
  1105. return;
  1106. no_mem:
  1107. _debug("out of memory");
  1108. goto maybe_reschedule;
  1109. }