tcp_sack.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903
  1. /*-
  2. * SPDX-License-Identifier: BSD-3-Clause
  3. *
  4. * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
  5. * The Regents of the University of California.
  6. * All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions
  10. * are met:
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the name of the University nor the names of its contributors
  17. * may be used to endorse or promote products derived from this software
  18. * without specific prior written permission.
  19. *
  20. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  21. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  22. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  23. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  24. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  25. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  26. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  27. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  28. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  29. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  30. * SUCH DAMAGE.
  31. *
  32. * @(#)tcp_sack.c 8.12 (Berkeley) 5/24/95
  33. */
  34. /*-
  35. * @@(#)COPYRIGHT 1.1 (NRL) 17 January 1995
  36. *
  37. * NRL grants permission for redistribution and use in source and binary
  38. * forms, with or without modification, of the software and documentation
  39. * created at NRL provided that the following conditions are met:
  40. *
  41. * 1. Redistributions of source code must retain the above copyright
  42. * notice, this list of conditions and the following disclaimer.
  43. * 2. Redistributions in binary form must reproduce the above copyright
  44. * notice, this list of conditions and the following disclaimer in the
  45. * documentation and/or other materials provided with the distribution.
  46. * 3. All advertising materials mentioning features or use of this software
  47. * must display the following acknowledgements:
  48. * This product includes software developed by the University of
  49. * California, Berkeley and its contributors.
  50. * This product includes software developed at the Information
  51. * Technology Division, US Naval Research Laboratory.
  52. * 4. Neither the name of the NRL nor the names of its contributors
  53. * may be used to endorse or promote products derived from this software
  54. * without specific prior written permission.
  55. *
  56. * THE SOFTWARE PROVIDED BY NRL IS PROVIDED BY NRL AND CONTRIBUTORS ``AS
  57. * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
  58. * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
  59. * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NRL OR
  60. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  61. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  62. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  63. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  64. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  65. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  66. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  67. *
  68. * The views and conclusions contained in the software and documentation
  69. * are those of the authors and should not be interpreted as representing
  70. * official policies, either expressed or implied, of the US Naval
  71. * Research Laboratory (NRL).
  72. */
  73. #include <sys/cdefs.h>
  74. __FBSDID("$FreeBSD$");
  75. #include "opt_inet.h"
  76. #include "opt_inet6.h"
  77. #include "opt_tcpdebug.h"
  78. #include <sys/param.h>
  79. #include <sys/systm.h>
  80. #include <sys/kernel.h>
  81. #include <sys/sysctl.h>
  82. #include <sys/malloc.h>
  83. #include <sys/mbuf.h>
  84. #include <sys/proc.h> /* for proc0 declaration */
  85. #include <sys/protosw.h>
  86. #include <sys/socket.h>
  87. #include <sys/socketvar.h>
  88. #include <sys/syslog.h>
  89. #include <sys/systm.h>
  90. #include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */
  91. #include <vm/uma.h>
  92. #include <net/if.h>
  93. #include <net/if_var.h>
  94. #include <net/route.h>
  95. #include <net/vnet.h>
  96. #include <netinet/in.h>
  97. #include <netinet/in_systm.h>
  98. #include <netinet/ip.h>
  99. #include <netinet/in_var.h>
  100. #include <netinet/in_pcb.h>
  101. #include <netinet/ip_var.h>
  102. #include <netinet/ip6.h>
  103. #include <netinet/icmp6.h>
  104. #include <netinet6/nd6.h>
  105. #include <netinet6/ip6_var.h>
  106. #include <netinet6/in6_pcb.h>
  107. #include <netinet/tcp.h>
  108. #include <netinet/tcp_fsm.h>
  109. #include <netinet/tcp_seq.h>
  110. #include <netinet/tcp_timer.h>
  111. #include <netinet/tcp_var.h>
  112. #include <netinet6/tcp6_var.h>
  113. #include <netinet/tcpip.h>
  114. #ifdef TCPDEBUG
  115. #include <netinet/tcp_debug.h>
  116. #endif /* TCPDEBUG */
  117. #include <machine/in_cksum.h>
  118. VNET_DECLARE(struct uma_zone *, sack_hole_zone);
  119. #define V_sack_hole_zone VNET(sack_hole_zone)
  120. SYSCTL_NODE(_net_inet_tcp, OID_AUTO, sack, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
  121. "TCP SACK");
  122. VNET_DEFINE(int, tcp_do_sack) = 1;
  123. #define V_tcp_do_sack VNET(tcp_do_sack)
  124. SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW,
  125. &VNET_NAME(tcp_do_sack), 0, "Enable/Disable TCP SACK support");
  126. VNET_DEFINE(int, tcp_sack_maxholes) = 128;
  127. SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, maxholes, CTLFLAG_VNET | CTLFLAG_RW,
  128. &VNET_NAME(tcp_sack_maxholes), 0,
  129. "Maximum number of TCP SACK holes allowed per connection");
  130. VNET_DEFINE(int, tcp_sack_globalmaxholes) = 65536;
  131. SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, globalmaxholes, CTLFLAG_VNET | CTLFLAG_RW,
  132. &VNET_NAME(tcp_sack_globalmaxholes), 0,
  133. "Global maximum number of TCP SACK holes");
  134. VNET_DEFINE(int, tcp_sack_globalholes) = 0;
  135. SYSCTL_INT(_net_inet_tcp_sack, OID_AUTO, globalholes, CTLFLAG_VNET | CTLFLAG_RD,
  136. &VNET_NAME(tcp_sack_globalholes), 0,
  137. "Global number of TCP SACK holes currently allocated");
  138. /*
  139. * This function will find overlaps with the currently stored sackblocks
  140. * and add any overlap as a dsack block upfront
  141. */
  142. void
  143. tcp_update_dsack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end)
  144. {
  145. struct sackblk head_blk,mid_blk,saved_blks[MAX_SACK_BLKS];
  146. int i, j, n, identical;
  147. tcp_seq start, end;
  148. INP_WLOCK_ASSERT(tp->t_inpcb);
  149. KASSERT(SEQ_LT(rcv_start, rcv_end), ("rcv_start < rcv_end"));
  150. if (SEQ_LT(rcv_end, tp->rcv_nxt) ||
  151. ((rcv_end == tp->rcv_nxt) &&
  152. (tp->rcv_numsacks > 0 ) &&
  153. (tp->sackblks[0].end == tp->rcv_nxt))) {
  154. saved_blks[0].start = rcv_start;
  155. saved_blks[0].end = rcv_end;
  156. } else {
  157. saved_blks[0].start = saved_blks[0].end = 0;
  158. }
  159. head_blk.start = head_blk.end = 0;
  160. mid_blk.start = rcv_start;
  161. mid_blk.end = rcv_end;
  162. identical = 0;
  163. for (i = 0; i < tp->rcv_numsacks; i++) {
  164. start = tp->sackblks[i].start;
  165. end = tp->sackblks[i].end;
  166. if (SEQ_LT(rcv_end, start)) {
  167. /* pkt left to sack blk */
  168. continue;
  169. }
  170. if (SEQ_GT(rcv_start, end)) {
  171. /* pkt right to sack blk */
  172. continue;
  173. }
  174. if (SEQ_GT(tp->rcv_nxt, end)) {
  175. if ((SEQ_MAX(rcv_start, start) != SEQ_MIN(rcv_end, end)) &&
  176. (SEQ_GT(head_blk.start, SEQ_MAX(rcv_start, start)) ||
  177. (head_blk.start == head_blk.end))) {
  178. head_blk.start = SEQ_MAX(rcv_start, start);
  179. head_blk.end = SEQ_MIN(rcv_end, end);
  180. }
  181. continue;
  182. }
  183. if (((head_blk.start == head_blk.end) ||
  184. SEQ_LT(start, head_blk.start)) &&
  185. (SEQ_GT(end, rcv_start) &&
  186. SEQ_LEQ(start, rcv_end))) {
  187. head_blk.start = start;
  188. head_blk.end = end;
  189. }
  190. mid_blk.start = SEQ_MIN(mid_blk.start, start);
  191. mid_blk.end = SEQ_MAX(mid_blk.end, end);
  192. if ((mid_blk.start == start) &&
  193. (mid_blk.end == end))
  194. identical = 1;
  195. }
  196. if (SEQ_LT(head_blk.start, head_blk.end)) {
  197. /* store overlapping range */
  198. saved_blks[0].start = SEQ_MAX(rcv_start, head_blk.start);
  199. saved_blks[0].end = SEQ_MIN(rcv_end, head_blk.end);
  200. }
  201. n = 1;
  202. /*
  203. * Second, if not ACKed, store the SACK block that
  204. * overlaps with the DSACK block unless it is identical
  205. */
  206. if ((SEQ_LT(tp->rcv_nxt, mid_blk.end) &&
  207. !((mid_blk.start == saved_blks[0].start) &&
  208. (mid_blk.end == saved_blks[0].end))) ||
  209. identical == 1) {
  210. saved_blks[n].start = mid_blk.start;
  211. saved_blks[n++].end = mid_blk.end;
  212. }
  213. for (j = 0; (j < tp->rcv_numsacks) && (n < MAX_SACK_BLKS); j++) {
  214. if (((SEQ_LT(tp->sackblks[j].end, mid_blk.start) ||
  215. SEQ_GT(tp->sackblks[j].start, mid_blk.end)) &&
  216. (SEQ_GT(tp->sackblks[j].start, tp->rcv_nxt))))
  217. saved_blks[n++] = tp->sackblks[j];
  218. }
  219. j = 0;
  220. for (i = 0; i < n; i++) {
  221. /* we can end up with a stale initial entry */
  222. if (SEQ_LT(saved_blks[i].start, saved_blks[i].end)) {
  223. tp->sackblks[j++] = saved_blks[i];
  224. }
  225. }
  226. tp->rcv_numsacks = j;
  227. }
  228. /*
  229. * This function is called upon receipt of new valid data (while not in
  230. * header prediction mode), and it updates the ordered list of sacks.
  231. */
  232. void
  233. tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end)
  234. {
  235. /*
  236. * First reported block MUST be the most recent one. Subsequent
  237. * blocks SHOULD be in the order in which they arrived at the
  238. * receiver. These two conditions make the implementation fully
  239. * compliant with RFC 2018.
  240. */
  241. struct sackblk head_blk, saved_blks[MAX_SACK_BLKS];
  242. int num_head, num_saved, i;
  243. INP_WLOCK_ASSERT(tp->t_inpcb);
  244. /* Check arguments. */
  245. KASSERT(SEQ_LEQ(rcv_start, rcv_end), ("rcv_start <= rcv_end"));
  246. if ((rcv_start == rcv_end) &&
  247. (tp->rcv_numsacks >= 1) &&
  248. (rcv_end == tp->sackblks[0].end)) {
  249. /* retaining DSACK block below rcv_nxt (todrop) */
  250. head_blk = tp->sackblks[0];
  251. } else {
  252. /* SACK block for the received segment. */
  253. head_blk.start = rcv_start;
  254. head_blk.end = rcv_end;
  255. }
  256. /*
  257. * Merge updated SACK blocks into head_blk, and save unchanged SACK
  258. * blocks into saved_blks[]. num_saved will have the number of the
  259. * saved SACK blocks.
  260. */
  261. num_saved = 0;
  262. for (i = 0; i < tp->rcv_numsacks; i++) {
  263. tcp_seq start = tp->sackblks[i].start;
  264. tcp_seq end = tp->sackblks[i].end;
  265. if (SEQ_GEQ(start, end) || SEQ_LEQ(start, tp->rcv_nxt)) {
  266. /*
  267. * Discard this SACK block.
  268. */
  269. } else if (SEQ_LEQ(head_blk.start, end) &&
  270. SEQ_GEQ(head_blk.end, start)) {
  271. /*
  272. * Merge this SACK block into head_blk. This SACK
  273. * block itself will be discarded.
  274. */
  275. /*
  276. * |-|
  277. * |---| merge
  278. *
  279. * |-|
  280. * |---| merge
  281. *
  282. * |-----|
  283. * |-| DSACK smaller
  284. *
  285. * |-|
  286. * |-----| DSACK smaller
  287. */
  288. if (head_blk.start == end)
  289. head_blk.start = start;
  290. else if (head_blk.end == start)
  291. head_blk.end = end;
  292. else {
  293. if (SEQ_LT(head_blk.start, start)) {
  294. tcp_seq temp = start;
  295. start = head_blk.start;
  296. head_blk.start = temp;
  297. }
  298. if (SEQ_GT(head_blk.end, end)) {
  299. tcp_seq temp = end;
  300. end = head_blk.end;
  301. head_blk.end = temp;
  302. }
  303. if ((head_blk.start != start) ||
  304. (head_blk.end != end)) {
  305. if ((num_saved >= 1) &&
  306. SEQ_GEQ(saved_blks[num_saved-1].start, start) &&
  307. SEQ_LEQ(saved_blks[num_saved-1].end, end))
  308. num_saved--;
  309. saved_blks[num_saved].start = start;
  310. saved_blks[num_saved].end = end;
  311. num_saved++;
  312. }
  313. }
  314. } else {
  315. /*
  316. * This block supercedes the prior block
  317. */
  318. if ((num_saved >= 1) &&
  319. SEQ_GEQ(saved_blks[num_saved-1].start, start) &&
  320. SEQ_LEQ(saved_blks[num_saved-1].end, end))
  321. num_saved--;
  322. /*
  323. * Save this SACK block.
  324. */
  325. saved_blks[num_saved].start = start;
  326. saved_blks[num_saved].end = end;
  327. num_saved++;
  328. }
  329. }
  330. /*
  331. * Update SACK list in tp->sackblks[].
  332. */
  333. num_head = 0;
  334. if (SEQ_LT(rcv_start, rcv_end)) {
  335. /*
  336. * The received data segment is an out-of-order segment. Put
  337. * head_blk at the top of SACK list.
  338. */
  339. tp->sackblks[0] = head_blk;
  340. num_head = 1;
  341. /*
  342. * If the number of saved SACK blocks exceeds its limit,
  343. * discard the last SACK block.
  344. */
  345. if (num_saved >= MAX_SACK_BLKS)
  346. num_saved--;
  347. }
  348. if ((rcv_start == rcv_end) &&
  349. (rcv_start == tp->sackblks[0].end)) {
  350. num_head = 1;
  351. }
  352. if (num_saved > 0) {
  353. /*
  354. * Copy the saved SACK blocks back.
  355. */
  356. bcopy(saved_blks, &tp->sackblks[num_head],
  357. sizeof(struct sackblk) * num_saved);
  358. }
  359. /* Save the number of SACK blocks. */
  360. tp->rcv_numsacks = num_head + num_saved;
  361. }
  362. void
  363. tcp_clean_dsack_blocks(struct tcpcb *tp)
  364. {
  365. struct sackblk saved_blks[MAX_SACK_BLKS];
  366. int num_saved, i;
  367. INP_WLOCK_ASSERT(tp->t_inpcb);
  368. /*
  369. * Clean up any DSACK blocks that
  370. * are in our queue of sack blocks.
  371. *
  372. */
  373. num_saved = 0;
  374. for (i = 0; i < tp->rcv_numsacks; i++) {
  375. tcp_seq start = tp->sackblks[i].start;
  376. tcp_seq end = tp->sackblks[i].end;
  377. if (SEQ_GEQ(start, end) || SEQ_LEQ(start, tp->rcv_nxt)) {
  378. /*
  379. * Discard this D-SACK block.
  380. */
  381. continue;
  382. }
  383. /*
  384. * Save this SACK block.
  385. */
  386. saved_blks[num_saved].start = start;
  387. saved_blks[num_saved].end = end;
  388. num_saved++;
  389. }
  390. if (num_saved > 0) {
  391. /*
  392. * Copy the saved SACK blocks back.
  393. */
  394. bcopy(saved_blks, &tp->sackblks[0],
  395. sizeof(struct sackblk) * num_saved);
  396. }
  397. tp->rcv_numsacks = num_saved;
  398. }
  399. /*
  400. * Delete all receiver-side SACK information.
  401. */
  402. void
  403. tcp_clean_sackreport(struct tcpcb *tp)
  404. {
  405. int i;
  406. INP_WLOCK_ASSERT(tp->t_inpcb);
  407. tp->rcv_numsacks = 0;
  408. for (i = 0; i < MAX_SACK_BLKS; i++)
  409. tp->sackblks[i].start = tp->sackblks[i].end=0;
  410. }
  411. /*
  412. * Allocate struct sackhole.
  413. */
  414. static struct sackhole *
  415. tcp_sackhole_alloc(struct tcpcb *tp, tcp_seq start, tcp_seq end)
  416. {
  417. struct sackhole *hole;
  418. if (tp->snd_numholes >= V_tcp_sack_maxholes ||
  419. V_tcp_sack_globalholes >= V_tcp_sack_globalmaxholes) {
  420. TCPSTAT_INC(tcps_sack_sboverflow);
  421. return NULL;
  422. }
  423. hole = (struct sackhole *)uma_zalloc(V_sack_hole_zone, M_NOWAIT);
  424. if (hole == NULL)
  425. return NULL;
  426. hole->start = start;
  427. hole->end = end;
  428. hole->rxmit = start;
  429. tp->snd_numholes++;
  430. atomic_add_int(&V_tcp_sack_globalholes, 1);
  431. return hole;
  432. }
  433. /*
  434. * Free struct sackhole.
  435. */
  436. static void
  437. tcp_sackhole_free(struct tcpcb *tp, struct sackhole *hole)
  438. {
  439. uma_zfree(V_sack_hole_zone, hole);
  440. tp->snd_numholes--;
  441. atomic_subtract_int(&V_tcp_sack_globalholes, 1);
  442. KASSERT(tp->snd_numholes >= 0, ("tp->snd_numholes >= 0"));
  443. KASSERT(V_tcp_sack_globalholes >= 0, ("tcp_sack_globalholes >= 0"));
  444. }
  445. /*
  446. * Insert new SACK hole into scoreboard.
  447. */
  448. static struct sackhole *
  449. tcp_sackhole_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end,
  450. struct sackhole *after)
  451. {
  452. struct sackhole *hole;
  453. /* Allocate a new SACK hole. */
  454. hole = tcp_sackhole_alloc(tp, start, end);
  455. if (hole == NULL)
  456. return NULL;
  457. /* Insert the new SACK hole into scoreboard. */
  458. if (after != NULL)
  459. TAILQ_INSERT_AFTER(&tp->snd_holes, after, hole, scblink);
  460. else
  461. TAILQ_INSERT_TAIL(&tp->snd_holes, hole, scblink);
  462. /* Update SACK hint. */
  463. if (tp->sackhint.nexthole == NULL)
  464. tp->sackhint.nexthole = hole;
  465. return hole;
  466. }
  467. /*
  468. * Remove SACK hole from scoreboard.
  469. */
  470. static void
  471. tcp_sackhole_remove(struct tcpcb *tp, struct sackhole *hole)
  472. {
  473. /* Update SACK hint. */
  474. if (tp->sackhint.nexthole == hole)
  475. tp->sackhint.nexthole = TAILQ_NEXT(hole, scblink);
  476. /* Remove this SACK hole. */
  477. TAILQ_REMOVE(&tp->snd_holes, hole, scblink);
  478. /* Free this SACK hole. */
  479. tcp_sackhole_free(tp, hole);
  480. }
  481. /*
  482. * Process cumulative ACK and the TCP SACK option to update the scoreboard.
  483. * tp->snd_holes is an ordered list of holes (oldest to newest, in terms of
  484. * the sequence space).
  485. * Returns 1 if incoming ACK has previously unknown SACK information,
  486. * 0 otherwise.
  487. */
  488. int
  489. tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, tcp_seq th_ack)
  490. {
  491. struct sackhole *cur, *temp;
  492. struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1], *sblkp;
  493. int i, j, num_sack_blks, sack_changed;
  494. int delivered_data, left_edge_delta;
  495. INP_WLOCK_ASSERT(tp->t_inpcb);
  496. num_sack_blks = 0;
  497. sack_changed = 0;
  498. delivered_data = 0;
  499. left_edge_delta = 0;
  500. /*
  501. * If SND.UNA will be advanced by SEG.ACK, and if SACK holes exist,
  502. * treat [SND.UNA, SEG.ACK) as if it is a SACK block.
  503. * Account changes to SND.UNA always in delivered data.
  504. */
  505. if (SEQ_LT(tp->snd_una, th_ack) && !TAILQ_EMPTY(&tp->snd_holes)) {
  506. left_edge_delta = th_ack - tp->snd_una;
  507. sack_blocks[num_sack_blks].start = tp->snd_una;
  508. sack_blocks[num_sack_blks++].end = th_ack;
  509. }
  510. /*
  511. * Append received valid SACK blocks to sack_blocks[], but only if we
  512. * received new blocks from the other side.
  513. */
  514. if (to->to_flags & TOF_SACK) {
  515. for (i = 0; i < to->to_nsacks; i++) {
  516. bcopy((to->to_sacks + i * TCPOLEN_SACK),
  517. &sack, sizeof(sack));
  518. sack.start = ntohl(sack.start);
  519. sack.end = ntohl(sack.end);
  520. if (SEQ_GT(sack.end, sack.start) &&
  521. SEQ_GT(sack.start, tp->snd_una) &&
  522. SEQ_GT(sack.start, th_ack) &&
  523. SEQ_LT(sack.start, tp->snd_max) &&
  524. SEQ_GT(sack.end, tp->snd_una) &&
  525. SEQ_LEQ(sack.end, tp->snd_max)) {
  526. sack_blocks[num_sack_blks++] = sack;
  527. }
  528. }
  529. }
  530. /*
  531. * Return if SND.UNA is not advanced and no valid SACK block is
  532. * received.
  533. */
  534. if (num_sack_blks == 0)
  535. return (sack_changed);
  536. /*
  537. * Sort the SACK blocks so we can update the scoreboard with just one
  538. * pass. The overhead of sorting up to 4+1 elements is less than
  539. * making up to 4+1 passes over the scoreboard.
  540. */
  541. for (i = 0; i < num_sack_blks; i++) {
  542. for (j = i + 1; j < num_sack_blks; j++) {
  543. if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
  544. sack = sack_blocks[i];
  545. sack_blocks[i] = sack_blocks[j];
  546. sack_blocks[j] = sack;
  547. }
  548. }
  549. }
  550. if (TAILQ_EMPTY(&tp->snd_holes)) {
  551. /*
  552. * Empty scoreboard. Need to initialize snd_fack (it may be
  553. * uninitialized or have a bogus value). Scoreboard holes
  554. * (from the sack blocks received) are created later below
  555. * (in the logic that adds holes to the tail of the
  556. * scoreboard).
  557. */
  558. tp->snd_fack = SEQ_MAX(tp->snd_una, th_ack);
  559. tp->sackhint.sacked_bytes = 0; /* reset */
  560. }
  561. /*
  562. * In the while-loop below, incoming SACK blocks (sack_blocks[]) and
  563. * SACK holes (snd_holes) are traversed from their tails with just
  564. * one pass in order to reduce the number of compares especially when
  565. * the bandwidth-delay product is large.
  566. *
  567. * Note: Typically, in the first RTT of SACK recovery, the highest
  568. * three or four SACK blocks with the same ack number are received.
  569. * In the second RTT, if retransmitted data segments are not lost,
  570. * the highest three or four SACK blocks with ack number advancing
  571. * are received.
  572. */
  573. sblkp = &sack_blocks[num_sack_blks - 1]; /* Last SACK block */
  574. tp->sackhint.last_sack_ack = sblkp->end;
  575. if (SEQ_LT(tp->snd_fack, sblkp->start)) {
  576. /*
  577. * The highest SACK block is beyond fack. Append new SACK
  578. * hole at the tail. If the second or later highest SACK
  579. * blocks are also beyond the current fack, they will be
  580. * inserted by way of hole splitting in the while-loop below.
  581. */
  582. temp = tcp_sackhole_insert(tp, tp->snd_fack,sblkp->start,NULL);
  583. if (temp != NULL) {
  584. delivered_data += sblkp->end - sblkp->start;
  585. tp->snd_fack = sblkp->end;
  586. /* Go to the previous sack block. */
  587. sblkp--;
  588. sack_changed = 1;
  589. } else {
  590. /*
  591. * We failed to add a new hole based on the current
  592. * sack block. Skip over all the sack blocks that
  593. * fall completely to the right of snd_fack and
  594. * proceed to trim the scoreboard based on the
  595. * remaining sack blocks. This also trims the
  596. * scoreboard for th_ack (which is sack_blocks[0]).
  597. */
  598. while (sblkp >= sack_blocks &&
  599. SEQ_LT(tp->snd_fack, sblkp->start))
  600. sblkp--;
  601. if (sblkp >= sack_blocks &&
  602. SEQ_LT(tp->snd_fack, sblkp->end)) {
  603. delivered_data += sblkp->end - tp->snd_fack;
  604. tp->snd_fack = sblkp->end;
  605. sack_changed = 1;
  606. }
  607. }
  608. } else if (SEQ_LT(tp->snd_fack, sblkp->end)) {
  609. /* fack is advanced. */
  610. delivered_data += sblkp->end - tp->snd_fack;
  611. tp->snd_fack = sblkp->end;
  612. sack_changed = 1;
  613. }
  614. cur = TAILQ_LAST(&tp->snd_holes, sackhole_head); /* Last SACK hole. */
  615. /*
  616. * Since the incoming sack blocks are sorted, we can process them
  617. * making one sweep of the scoreboard.
  618. */
  619. while (sblkp >= sack_blocks && cur != NULL) {
  620. if (SEQ_GEQ(sblkp->start, cur->end)) {
  621. /*
  622. * SACKs data beyond the current hole. Go to the
  623. * previous sack block.
  624. */
  625. sblkp--;
  626. continue;
  627. }
  628. if (SEQ_LEQ(sblkp->end, cur->start)) {
  629. /*
  630. * SACKs data before the current hole. Go to the
  631. * previous hole.
  632. */
  633. cur = TAILQ_PREV(cur, sackhole_head, scblink);
  634. continue;
  635. }
  636. tp->sackhint.sack_bytes_rexmit -= (cur->rxmit - cur->start);
  637. KASSERT(tp->sackhint.sack_bytes_rexmit >= 0,
  638. ("sackhint bytes rtx >= 0"));
  639. sack_changed = 1;
  640. if (SEQ_LEQ(sblkp->start, cur->start)) {
  641. /* Data acks at least the beginning of hole. */
  642. if (SEQ_GEQ(sblkp->end, cur->end)) {
  643. /* Acks entire hole, so delete hole. */
  644. delivered_data += (cur->end - cur->start);
  645. temp = cur;
  646. cur = TAILQ_PREV(cur, sackhole_head, scblink);
  647. tcp_sackhole_remove(tp, temp);
  648. /*
  649. * The sack block may ack all or part of the
  650. * next hole too, so continue onto the next
  651. * hole.
  652. */
  653. continue;
  654. } else {
  655. /* Move start of hole forward. */
  656. delivered_data += (sblkp->end - cur->start);
  657. cur->start = sblkp->end;
  658. cur->rxmit = SEQ_MAX(cur->rxmit, cur->start);
  659. }
  660. } else {
  661. /* Data acks at least the end of hole. */
  662. if (SEQ_GEQ(sblkp->end, cur->end)) {
  663. /* Move end of hole backward. */
  664. delivered_data += (cur->end - sblkp->start);
  665. cur->end = sblkp->start;
  666. cur->rxmit = SEQ_MIN(cur->rxmit, cur->end);
  667. } else {
  668. /*
  669. * ACKs some data in middle of a hole; need
  670. * to split current hole
  671. */
  672. temp = tcp_sackhole_insert(tp, sblkp->end,
  673. cur->end, cur);
  674. if (temp != NULL) {
  675. if (SEQ_GT(cur->rxmit, temp->rxmit)) {
  676. temp->rxmit = cur->rxmit;
  677. tp->sackhint.sack_bytes_rexmit
  678. += (temp->rxmit
  679. - temp->start);
  680. }
  681. cur->end = sblkp->start;
  682. cur->rxmit = SEQ_MIN(cur->rxmit,
  683. cur->end);
  684. delivered_data += (sblkp->end - sblkp->start);
  685. }
  686. }
  687. }
  688. tp->sackhint.sack_bytes_rexmit += (cur->rxmit - cur->start);
  689. /*
  690. * Testing sblkp->start against cur->start tells us whether
  691. * we're done with the sack block or the sack hole.
  692. * Accordingly, we advance one or the other.
  693. */
  694. if (SEQ_LEQ(sblkp->start, cur->start))
  695. cur = TAILQ_PREV(cur, sackhole_head, scblink);
  696. else
  697. sblkp--;
  698. }
  699. tp->sackhint.delivered_data = delivered_data;
  700. tp->sackhint.sacked_bytes += delivered_data - left_edge_delta;
  701. KASSERT((delivered_data >= 0), ("delivered_data < 0"));
  702. KASSERT((tp->sackhint.sacked_bytes >= 0), ("sacked_bytes < 0"));
  703. return (sack_changed);
  704. }
  705. /*
  706. * Free all SACK holes to clear the scoreboard.
  707. */
  708. void
  709. tcp_free_sackholes(struct tcpcb *tp)
  710. {
  711. struct sackhole *q;
  712. INP_WLOCK_ASSERT(tp->t_inpcb);
  713. while ((q = TAILQ_FIRST(&tp->snd_holes)) != NULL)
  714. tcp_sackhole_remove(tp, q);
  715. tp->sackhint.sack_bytes_rexmit = 0;
  716. KASSERT(tp->snd_numholes == 0, ("tp->snd_numholes == 0"));
  717. KASSERT(tp->sackhint.nexthole == NULL,
  718. ("tp->sackhint.nexthole == NULL"));
  719. }
  720. /*
  721. * Partial ack handling within a sack recovery episode. Keeping this very
  722. * simple for now. When a partial ack is received, force snd_cwnd to a value
  723. * that will allow the sender to transmit no more than 2 segments. If
  724. * necessary, a better scheme can be adopted at a later point, but for now,
  725. * the goal is to prevent the sender from bursting a large amount of data in
  726. * the midst of sack recovery.
  727. */
  728. void
  729. tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th)
  730. {
  731. int num_segs = 1;
  732. u_int maxseg = tcp_maxseg(tp);
  733. INP_WLOCK_ASSERT(tp->t_inpcb);
  734. tcp_timer_activate(tp, TT_REXMT, 0);
  735. tp->t_rtttime = 0;
  736. /* Send one or 2 segments based on how much new data was acked. */
  737. if ((BYTES_THIS_ACK(tp, th) / maxseg) >= 2)
  738. num_segs = 2;
  739. tp->snd_cwnd = (tp->sackhint.sack_bytes_rexmit +
  740. (tp->snd_nxt - tp->snd_recover) + num_segs * maxseg);
  741. if (tp->snd_cwnd > tp->snd_ssthresh)
  742. tp->snd_cwnd = tp->snd_ssthresh;
  743. tp->t_flags |= TF_ACKNOW;
  744. (void) tp->t_fb->tfb_tcp_output(tp);
  745. }
  746. #if 0
  747. /*
  748. * Debug version of tcp_sack_output() that walks the scoreboard. Used for
  749. * now to sanity check the hint.
  750. */
  751. static struct sackhole *
  752. tcp_sack_output_debug(struct tcpcb *tp, int *sack_bytes_rexmt)
  753. {
  754. struct sackhole *p;
  755. INP_WLOCK_ASSERT(tp->t_inpcb);
  756. *sack_bytes_rexmt = 0;
  757. TAILQ_FOREACH(p, &tp->snd_holes, scblink) {
  758. if (SEQ_LT(p->rxmit, p->end)) {
  759. if (SEQ_LT(p->rxmit, tp->snd_una)) {/* old SACK hole */
  760. continue;
  761. }
  762. *sack_bytes_rexmt += (p->rxmit - p->start);
  763. break;
  764. }
  765. *sack_bytes_rexmt += (p->rxmit - p->start);
  766. }
  767. return (p);
  768. }
  769. #endif
  770. /*
  771. * Returns the next hole to retransmit and the number of retransmitted bytes
  772. * from the scoreboard. We store both the next hole and the number of
  773. * retransmitted bytes as hints (and recompute these on the fly upon SACK/ACK
  774. * reception). This avoids scoreboard traversals completely.
  775. *
  776. * The loop here will traverse *at most* one link. Here's the argument. For
  777. * the loop to traverse more than 1 link before finding the next hole to
  778. * retransmit, we would need to have at least 1 node following the current
  779. * hint with (rxmit == end). But, for all holes following the current hint,
  780. * (start == rxmit), since we have not yet retransmitted from them.
  781. * Therefore, in order to traverse more 1 link in the loop below, we need to
  782. * have at least one node following the current hint with (start == rxmit ==
  783. * end). But that can't happen, (start == end) means that all the data in
  784. * that hole has been sacked, in which case, the hole would have been removed
  785. * from the scoreboard.
  786. */
  787. struct sackhole *
  788. tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt)
  789. {
  790. struct sackhole *hole = NULL;
  791. INP_WLOCK_ASSERT(tp->t_inpcb);
  792. *sack_bytes_rexmt = tp->sackhint.sack_bytes_rexmit;
  793. hole = tp->sackhint.nexthole;
  794. if (hole == NULL || SEQ_LT(hole->rxmit, hole->end))
  795. goto out;
  796. while ((hole = TAILQ_NEXT(hole, scblink)) != NULL) {
  797. if (SEQ_LT(hole->rxmit, hole->end)) {
  798. tp->sackhint.nexthole = hole;
  799. break;
  800. }
  801. }
  802. out:
  803. return (hole);
  804. }
  805. /*
  806. * After a timeout, the SACK list may be rebuilt. This SACK information
  807. * should be used to avoid retransmitting SACKed data. This function
  808. * traverses the SACK list to see if snd_nxt should be moved forward.
  809. */
  810. void
  811. tcp_sack_adjust(struct tcpcb *tp)
  812. {
  813. struct sackhole *p, *cur = TAILQ_FIRST(&tp->snd_holes);
  814. INP_WLOCK_ASSERT(tp->t_inpcb);
  815. if (cur == NULL)
  816. return; /* No holes */
  817. if (SEQ_GEQ(tp->snd_nxt, tp->snd_fack))
  818. return; /* We're already beyond any SACKed blocks */
  819. /*-
  820. * Two cases for which we want to advance snd_nxt:
  821. * i) snd_nxt lies between end of one hole and beginning of another
  822. * ii) snd_nxt lies between end of last hole and snd_fack
  823. */
  824. while ((p = TAILQ_NEXT(cur, scblink)) != NULL) {
  825. if (SEQ_LT(tp->snd_nxt, cur->end))
  826. return;
  827. if (SEQ_GEQ(tp->snd_nxt, p->start))
  828. cur = p;
  829. else {
  830. tp->snd_nxt = p->start;
  831. return;
  832. }
  833. }
  834. if (SEQ_LT(tp->snd_nxt, cur->end))
  835. return;
  836. tp->snd_nxt = tp->snd_fack;
  837. }