tcp_lro.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455
  1. /*-
  2. * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
  3. *
  4. * Copyright (c) 2007, Myricom Inc.
  5. * Copyright (c) 2008, Intel Corporation.
  6. * Copyright (c) 2012 The FreeBSD Foundation
  7. * Copyright (c) 2016 Mellanox Technologies.
  8. * All rights reserved.
  9. *
  10. * Portions of this software were developed by Bjoern Zeeb
  11. * under sponsorship from the FreeBSD Foundation.
  12. *
  13. * Redistribution and use in source and binary forms, with or without
  14. * modification, are permitted provided that the following conditions
  15. * are met:
  16. * 1. Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer.
  18. * 2. Redistributions in binary form must reproduce the above copyright
  19. * notice, this list of conditions and the following disclaimer in the
  20. * documentation and/or other materials provided with the distribution.
  21. *
  22. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  23. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  24. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  25. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  26. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  27. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  28. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  29. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  30. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  31. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  32. * SUCH DAMAGE.
  33. */
  34. #include <sys/cdefs.h>
  35. __FBSDID("$FreeBSD$");
  36. #include "opt_inet.h"
  37. #include "opt_inet6.h"
  38. #include <sys/param.h>
  39. #include <sys/systm.h>
  40. #include <sys/kernel.h>
  41. #include <sys/malloc.h>
  42. #include <sys/mbuf.h>
  43. #include <sys/socket.h>
  44. #include <sys/socketvar.h>
  45. #include <sys/sockbuf.h>
  46. #include <sys/sysctl.h>
  47. #include <net/if.h>
  48. #include <net/if_var.h>
  49. #include <net/ethernet.h>
  50. #include <net/vnet.h>
  51. #include <netinet/in_systm.h>
  52. #include <netinet/in.h>
  53. #include <netinet/ip6.h>
  54. #include <netinet/ip.h>
  55. #include <netinet/ip_var.h>
  56. #include <netinet/in_pcb.h>
  57. #include <netinet6/in6_pcb.h>
  58. #include <netinet/tcp.h>
  59. #include <netinet/tcp_seq.h>
  60. #include <netinet/tcp_lro.h>
  61. #include <netinet/tcp_var.h>
  62. #include <netinet/tcp_hpts.h>
  63. #include <netinet/tcp_log_buf.h>
  64. #include <netinet6/ip6_var.h>
  65. #include <machine/in_cksum.h>
  66. static MALLOC_DEFINE(M_LRO, "LRO", "LRO control structures");
  67. #define TCP_LRO_UPDATE_CSUM 1
  68. #ifndef TCP_LRO_UPDATE_CSUM
  69. #define TCP_LRO_INVALID_CSUM 0x0000
  70. #endif
  71. static void tcp_lro_rx_done(struct lro_ctrl *lc);
  72. static int tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m,
  73. uint32_t csum, int use_hash);
  74. SYSCTL_NODE(_net_inet_tcp, OID_AUTO, lro, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
  75. "TCP LRO");
  76. static long tcplro_stacks_wanting_mbufq = 0;
  77. counter_u64_t tcp_inp_lro_direct_queue;
  78. counter_u64_t tcp_inp_lro_wokeup_queue;
  79. counter_u64_t tcp_inp_lro_compressed;
  80. counter_u64_t tcp_inp_lro_single_push;
  81. counter_u64_t tcp_inp_lro_locks_taken;
  82. counter_u64_t tcp_inp_lro_sack_wake;
  83. static unsigned tcp_lro_entries = TCP_LRO_ENTRIES;
  84. static int32_t hold_lock_over_compress = 0;
  85. SYSCTL_INT(_net_inet_tcp_lro, OID_AUTO, hold_lock, CTLFLAG_RW,
  86. &hold_lock_over_compress, 0,
  87. "Do we hold the lock over the compress of mbufs?");
  88. SYSCTL_UINT(_net_inet_tcp_lro, OID_AUTO, entries,
  89. CTLFLAG_RDTUN | CTLFLAG_MPSAFE, &tcp_lro_entries, 0,
  90. "default number of LRO entries");
  91. SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, fullqueue, CTLFLAG_RD,
  92. &tcp_inp_lro_direct_queue, "Number of lro's fully queued to transport");
  93. SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, wokeup, CTLFLAG_RD,
  94. &tcp_inp_lro_wokeup_queue, "Number of lro's where we woke up transport via hpts");
  95. SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, compressed, CTLFLAG_RD,
  96. &tcp_inp_lro_compressed, "Number of lro's compressed and sent to transport");
  97. SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, single, CTLFLAG_RD,
  98. &tcp_inp_lro_single_push, "Number of lro's sent with single segment");
  99. SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, lockcnt, CTLFLAG_RD,
  100. &tcp_inp_lro_locks_taken, "Number of lro's inp_wlocks taken");
  101. SYSCTL_COUNTER_U64(_net_inet_tcp_lro, OID_AUTO, sackwakeups, CTLFLAG_RD,
  102. &tcp_inp_lro_sack_wake, "Number of wakeups caused by sack/fin");
  103. void
  104. tcp_lro_reg_mbufq(void)
  105. {
  106. atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, 1);
  107. }
  108. void
  109. tcp_lro_dereg_mbufq(void)
  110. {
  111. atomic_fetchadd_long(&tcplro_stacks_wanting_mbufq, -1);
  112. }
  113. static __inline void
  114. tcp_lro_active_insert(struct lro_ctrl *lc, struct lro_head *bucket,
  115. struct lro_entry *le)
  116. {
  117. LIST_INSERT_HEAD(&lc->lro_active, le, next);
  118. LIST_INSERT_HEAD(bucket, le, hash_next);
  119. }
  120. static __inline void
  121. tcp_lro_active_remove(struct lro_entry *le)
  122. {
  123. LIST_REMOVE(le, next); /* active list */
  124. LIST_REMOVE(le, hash_next); /* hash bucket */
  125. }
  126. int
  127. tcp_lro_init(struct lro_ctrl *lc)
  128. {
  129. return (tcp_lro_init_args(lc, NULL, tcp_lro_entries, 0));
  130. }
  131. int
  132. tcp_lro_init_args(struct lro_ctrl *lc, struct ifnet *ifp,
  133. unsigned lro_entries, unsigned lro_mbufs)
  134. {
  135. struct lro_entry *le;
  136. size_t size;
  137. unsigned i, elements;
  138. lc->lro_bad_csum = 0;
  139. lc->lro_queued = 0;
  140. lc->lro_flushed = 0;
  141. lc->lro_mbuf_count = 0;
  142. lc->lro_mbuf_max = lro_mbufs;
  143. lc->lro_cnt = lro_entries;
  144. lc->lro_ackcnt_lim = TCP_LRO_ACKCNT_MAX;
  145. lc->lro_length_lim = TCP_LRO_LENGTH_MAX;
  146. lc->ifp = ifp;
  147. LIST_INIT(&lc->lro_free);
  148. LIST_INIT(&lc->lro_active);
  149. /* create hash table to accelerate entry lookup */
  150. if (lro_entries > lro_mbufs)
  151. elements = lro_entries;
  152. else
  153. elements = lro_mbufs;
  154. lc->lro_hash = phashinit_flags(elements, M_LRO, &lc->lro_hashsz,
  155. HASH_NOWAIT);
  156. if (lc->lro_hash == NULL) {
  157. memset(lc, 0, sizeof(*lc));
  158. return (ENOMEM);
  159. }
  160. /* compute size to allocate */
  161. size = (lro_mbufs * sizeof(struct lro_mbuf_sort)) +
  162. (lro_entries * sizeof(*le));
  163. lc->lro_mbuf_data = (struct lro_mbuf_sort *)
  164. malloc(size, M_LRO, M_NOWAIT | M_ZERO);
  165. /* check for out of memory */
  166. if (lc->lro_mbuf_data == NULL) {
  167. free(lc->lro_hash, M_LRO);
  168. memset(lc, 0, sizeof(*lc));
  169. return (ENOMEM);
  170. }
  171. /* compute offset for LRO entries */
  172. le = (struct lro_entry *)
  173. (lc->lro_mbuf_data + lro_mbufs);
  174. /* setup linked list */
  175. for (i = 0; i != lro_entries; i++)
  176. LIST_INSERT_HEAD(&lc->lro_free, le + i, next);
  177. return (0);
  178. }
  179. static struct tcphdr *
  180. tcp_lro_get_th(struct lro_entry *le, struct mbuf *m)
  181. {
  182. struct ether_header *eh;
  183. struct tcphdr *th = NULL;
  184. #ifdef INET6
  185. struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */
  186. #endif
  187. #ifdef INET
  188. struct ip *ip4 = NULL; /* Keep compiler happy. */
  189. #endif
  190. eh = mtod(m, struct ether_header *);
  191. switch (le->eh_type) {
  192. #ifdef INET6
  193. case ETHERTYPE_IPV6:
  194. ip6 = (struct ip6_hdr *)(eh + 1);
  195. th = (struct tcphdr *)(ip6 + 1);
  196. break;
  197. #endif
  198. #ifdef INET
  199. case ETHERTYPE_IP:
  200. ip4 = (struct ip *)(eh + 1);
  201. th = (struct tcphdr *)(ip4 + 1);
  202. break;
  203. #endif
  204. }
  205. return (th);
  206. }
  207. void
  208. tcp_lro_free(struct lro_ctrl *lc)
  209. {
  210. struct lro_entry *le;
  211. unsigned x;
  212. /* reset LRO free list */
  213. LIST_INIT(&lc->lro_free);
  214. /* free active mbufs, if any */
  215. while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
  216. tcp_lro_active_remove(le);
  217. m_freem(le->m_head);
  218. }
  219. /* free hash table */
  220. free(lc->lro_hash, M_LRO);
  221. lc->lro_hash = NULL;
  222. lc->lro_hashsz = 0;
  223. /* free mbuf array, if any */
  224. for (x = 0; x != lc->lro_mbuf_count; x++)
  225. m_freem(lc->lro_mbuf_data[x].mb);
  226. lc->lro_mbuf_count = 0;
  227. /* free allocated memory, if any */
  228. free(lc->lro_mbuf_data, M_LRO);
  229. lc->lro_mbuf_data = NULL;
  230. }
  231. static uint16_t
  232. tcp_lro_csum_th(struct tcphdr *th)
  233. {
  234. uint32_t ch;
  235. uint16_t *p, l;
  236. ch = th->th_sum = 0x0000;
  237. l = th->th_off;
  238. p = (uint16_t *)th;
  239. while (l > 0) {
  240. ch += *p;
  241. p++;
  242. ch += *p;
  243. p++;
  244. l--;
  245. }
  246. while (ch > 0xffff)
  247. ch = (ch >> 16) + (ch & 0xffff);
  248. return (ch & 0xffff);
  249. }
  250. static uint16_t
  251. tcp_lro_rx_csum_fixup(struct lro_entry *le, void *l3hdr, struct tcphdr *th,
  252. uint16_t tcp_data_len, uint16_t csum)
  253. {
  254. uint32_t c;
  255. uint16_t cs;
  256. c = csum;
  257. /* Remove length from checksum. */
  258. switch (le->eh_type) {
  259. #ifdef INET6
  260. case ETHERTYPE_IPV6:
  261. {
  262. struct ip6_hdr *ip6;
  263. ip6 = (struct ip6_hdr *)l3hdr;
  264. if (le->append_cnt == 0)
  265. cs = ip6->ip6_plen;
  266. else {
  267. uint32_t cx;
  268. cx = ntohs(ip6->ip6_plen);
  269. cs = in6_cksum_pseudo(ip6, cx, ip6->ip6_nxt, 0);
  270. }
  271. break;
  272. }
  273. #endif
  274. #ifdef INET
  275. case ETHERTYPE_IP:
  276. {
  277. struct ip *ip4;
  278. ip4 = (struct ip *)l3hdr;
  279. if (le->append_cnt == 0)
  280. cs = ip4->ip_len;
  281. else {
  282. cs = in_addword(ntohs(ip4->ip_len) - sizeof(*ip4),
  283. IPPROTO_TCP);
  284. cs = in_pseudo(ip4->ip_src.s_addr, ip4->ip_dst.s_addr,
  285. htons(cs));
  286. }
  287. break;
  288. }
  289. #endif
  290. default:
  291. cs = 0; /* Keep compiler happy. */
  292. }
  293. cs = ~cs;
  294. c += cs;
  295. /* Remove TCP header csum. */
  296. cs = ~tcp_lro_csum_th(th);
  297. c += cs;
  298. while (c > 0xffff)
  299. c = (c >> 16) + (c & 0xffff);
  300. return (c & 0xffff);
  301. }
  302. static void
  303. tcp_lro_rx_done(struct lro_ctrl *lc)
  304. {
  305. struct lro_entry *le;
  306. while ((le = LIST_FIRST(&lc->lro_active)) != NULL) {
  307. tcp_lro_active_remove(le);
  308. tcp_lro_flush(lc, le);
  309. }
  310. }
  311. void
  312. tcp_lro_flush_inactive(struct lro_ctrl *lc, const struct timeval *timeout)
  313. {
  314. struct lro_entry *le, *le_tmp;
  315. struct timeval tv;
  316. if (LIST_EMPTY(&lc->lro_active))
  317. return;
  318. getmicrouptime(&tv);
  319. timevalsub(&tv, timeout);
  320. LIST_FOREACH_SAFE(le, &lc->lro_active, next, le_tmp) {
  321. if (timevalcmp(&tv, &le->mtime, >=)) {
  322. tcp_lro_active_remove(le);
  323. tcp_lro_flush(lc, le);
  324. }
  325. }
  326. }
  327. #ifdef INET6
  328. static int
  329. tcp_lro_rx_ipv6(struct lro_ctrl *lc, struct mbuf *m, struct ip6_hdr *ip6,
  330. struct tcphdr **th)
  331. {
  332. /* XXX-BZ we should check the flow-label. */
  333. /* XXX-BZ We do not yet support ext. hdrs. */
  334. if (ip6->ip6_nxt != IPPROTO_TCP)
  335. return (TCP_LRO_NOT_SUPPORTED);
  336. /* Find the TCP header. */
  337. *th = (struct tcphdr *)(ip6 + 1);
  338. return (0);
  339. }
  340. #endif
  341. #ifdef INET
  342. static int
  343. tcp_lro_rx_ipv4(struct lro_ctrl *lc, struct mbuf *m, struct ip *ip4,
  344. struct tcphdr **th)
  345. {
  346. int csum_flags;
  347. uint16_t csum;
  348. if (ip4->ip_p != IPPROTO_TCP)
  349. return (TCP_LRO_NOT_SUPPORTED);
  350. /* Ensure there are no options. */
  351. if ((ip4->ip_hl << 2) != sizeof (*ip4))
  352. return (TCP_LRO_CANNOT);
  353. /* .. and the packet is not fragmented. */
  354. if (ip4->ip_off & htons(IP_MF|IP_OFFMASK))
  355. return (TCP_LRO_CANNOT);
  356. /* Legacy IP has a header checksum that needs to be correct. */
  357. csum_flags = m->m_pkthdr.csum_flags;
  358. if (csum_flags & CSUM_IP_CHECKED) {
  359. if (__predict_false((csum_flags & CSUM_IP_VALID) == 0)) {
  360. lc->lro_bad_csum++;
  361. return (TCP_LRO_CANNOT);
  362. }
  363. } else {
  364. csum = in_cksum_hdr(ip4);
  365. if (__predict_false((csum) != 0)) {
  366. lc->lro_bad_csum++;
  367. return (TCP_LRO_CANNOT);
  368. }
  369. }
  370. /* Find the TCP header (we assured there are no IP options). */
  371. *th = (struct tcphdr *)(ip4 + 1);
  372. return (0);
  373. }
  374. #endif
  375. static void
  376. tcp_lro_log(struct tcpcb *tp, struct lro_ctrl *lc,
  377. struct lro_entry *le, struct mbuf *m, int frm, int32_t tcp_data_len,
  378. uint32_t th_seq , uint32_t th_ack, uint16_t th_win)
  379. {
  380. if (tp->t_logstate != TCP_LOG_STATE_OFF) {
  381. union tcp_log_stackspecific log;
  382. struct timeval tv;
  383. uint32_t cts;
  384. cts = tcp_get_usecs(&tv);
  385. memset(&log, 0, sizeof(union tcp_log_stackspecific));
  386. log.u_bbr.flex8 = frm;
  387. log.u_bbr.flex1 = tcp_data_len;
  388. if (m)
  389. log.u_bbr.flex2 = m->m_pkthdr.len;
  390. else
  391. log.u_bbr.flex2 = 0;
  392. log.u_bbr.flex3 = le->append_cnt;
  393. log.u_bbr.flex4 = le->p_len;
  394. log.u_bbr.flex5 = le->m_head->m_pkthdr.len;
  395. log.u_bbr.delRate = le->m_head->m_flags;
  396. log.u_bbr.rttProp = le->m_head->m_pkthdr.rcv_tstmp;
  397. log.u_bbr.flex6 = lc->lro_length_lim;
  398. log.u_bbr.flex7 = lc->lro_ackcnt_lim;
  399. log.u_bbr.inflight = th_seq;
  400. log.u_bbr.timeStamp = cts;
  401. log.u_bbr.epoch = le->next_seq;
  402. log.u_bbr.delivered = th_ack;
  403. log.u_bbr.lt_epoch = le->ack_seq;
  404. log.u_bbr.pacing_gain = th_win;
  405. log.u_bbr.cwnd_gain = le->window;
  406. log.u_bbr.cur_del_rate = (uintptr_t)m;
  407. log.u_bbr.bw_inuse = (uintptr_t)le->m_head;
  408. log.u_bbr.pkts_out = le->mbuf_cnt; /* Total mbufs added */
  409. log.u_bbr.applimited = le->ulp_csum;
  410. log.u_bbr.lost = le->mbuf_appended;
  411. TCP_LOG_EVENTP(tp, NULL,
  412. &tp->t_inpcb->inp_socket->so_rcv,
  413. &tp->t_inpcb->inp_socket->so_snd,
  414. TCP_LOG_LRO, 0,
  415. 0, &log, false, &tv);
  416. }
  417. }
  418. static void
  419. tcp_flush_out_le(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le, int locked)
  420. {
  421. if (le->append_cnt > 1) {
  422. struct tcphdr *th;
  423. uint16_t p_len;
  424. p_len = htons(le->p_len);
  425. switch (le->eh_type) {
  426. #ifdef INET6
  427. case ETHERTYPE_IPV6:
  428. {
  429. struct ip6_hdr *ip6;
  430. ip6 = le->le_ip6;
  431. ip6->ip6_plen = p_len;
  432. th = (struct tcphdr *)(ip6 + 1);
  433. le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
  434. CSUM_PSEUDO_HDR;
  435. le->p_len += ETHER_HDR_LEN + sizeof(*ip6);
  436. break;
  437. }
  438. #endif
  439. #ifdef INET
  440. case ETHERTYPE_IP:
  441. {
  442. struct ip *ip4;
  443. uint32_t cl;
  444. uint16_t c;
  445. ip4 = le->le_ip4;
  446. /* Fix IP header checksum for new length. */
  447. c = ~ip4->ip_sum;
  448. cl = c;
  449. c = ~ip4->ip_len;
  450. cl += c + p_len;
  451. while (cl > 0xffff)
  452. cl = (cl >> 16) + (cl & 0xffff);
  453. c = cl;
  454. ip4->ip_sum = ~c;
  455. ip4->ip_len = p_len;
  456. th = (struct tcphdr *)(ip4 + 1);
  457. le->m_head->m_pkthdr.csum_flags = CSUM_DATA_VALID |
  458. CSUM_PSEUDO_HDR | CSUM_IP_CHECKED | CSUM_IP_VALID;
  459. le->p_len += ETHER_HDR_LEN;
  460. break;
  461. }
  462. #endif
  463. default:
  464. th = NULL; /* Keep compiler happy. */
  465. }
  466. le->m_head->m_pkthdr.csum_data = 0xffff;
  467. le->m_head->m_pkthdr.len = le->p_len;
  468. /* Incorporate the latest ACK into the TCP header. */
  469. th->th_ack = le->ack_seq;
  470. th->th_win = le->window;
  471. /* Incorporate latest timestamp into the TCP header. */
  472. if (le->timestamp != 0) {
  473. uint32_t *ts_ptr;
  474. ts_ptr = (uint32_t *)(th + 1);
  475. ts_ptr[1] = htonl(le->tsval);
  476. ts_ptr[2] = le->tsecr;
  477. }
  478. /* Update the TCP header checksum. */
  479. le->ulp_csum += p_len;
  480. le->ulp_csum += tcp_lro_csum_th(th);
  481. while (le->ulp_csum > 0xffff)
  482. le->ulp_csum = (le->ulp_csum >> 16) +
  483. (le->ulp_csum & 0xffff);
  484. th->th_sum = (le->ulp_csum & 0xffff);
  485. th->th_sum = ~th->th_sum;
  486. if (tp && locked) {
  487. tcp_lro_log(tp, lc, le, NULL, 7, 0, 0, 0, 0);
  488. }
  489. }
  490. /*
  491. * Break any chain, this is not set to NULL on the singleton
  492. * case m_nextpkt points to m_head. Other case set them
  493. * m_nextpkt to NULL in push_and_replace.
  494. */
  495. le->m_head->m_nextpkt = NULL;
  496. le->m_head->m_pkthdr.lro_nsegs = le->append_cnt;
  497. if (tp && locked) {
  498. tcp_lro_log(tp, lc, le, le->m_head, 8, 0, 0, 0, 0);
  499. }
  500. (*lc->ifp->if_input)(lc->ifp, le->m_head);
  501. lc->lro_queued += le->append_cnt;
  502. }
  503. static void
  504. tcp_set_le_to_m(struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m)
  505. {
  506. struct ether_header *eh;
  507. void *l3hdr = NULL; /* Keep compiler happy. */
  508. struct tcphdr *th;
  509. #ifdef INET6
  510. struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */
  511. #endif
  512. #ifdef INET
  513. struct ip *ip4 = NULL; /* Keep compiler happy. */
  514. #endif
  515. uint32_t *ts_ptr;
  516. int error, l, ts_failed = 0;
  517. uint16_t tcp_data_len;
  518. uint16_t csum;
  519. error = -1;
  520. eh = mtod(m, struct ether_header *);
  521. /*
  522. * We must reset the other pointers since the mbuf
  523. * we were pointing too is about to go away.
  524. */
  525. switch (le->eh_type) {
  526. #ifdef INET6
  527. case ETHERTYPE_IPV6:
  528. l3hdr = ip6 = (struct ip6_hdr *)(eh + 1);
  529. error = tcp_lro_rx_ipv6(lc, m, ip6, &th);
  530. le->le_ip6 = ip6;
  531. le->source_ip6 = ip6->ip6_src;
  532. le->dest_ip6 = ip6->ip6_dst;
  533. le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN - sizeof(*ip6);
  534. break;
  535. #endif
  536. #ifdef INET
  537. case ETHERTYPE_IP:
  538. l3hdr = ip4 = (struct ip *)(eh + 1);
  539. error = tcp_lro_rx_ipv4(lc, m, ip4, &th);
  540. le->le_ip4 = ip4;
  541. le->source_ip4 = ip4->ip_src.s_addr;
  542. le->dest_ip4 = ip4->ip_dst.s_addr;
  543. le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN;
  544. break;
  545. #endif
  546. }
  547. KASSERT(error == 0, ("%s: le=%p tcp_lro_rx_xxx failed\n",
  548. __func__, le));
  549. ts_ptr = (uint32_t *)(th + 1);
  550. l = (th->th_off << 2);
  551. l -= sizeof(*th);
  552. if (l != 0 &&
  553. (__predict_false(l != TCPOLEN_TSTAMP_APPA) ||
  554. (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16|
  555. TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) {
  556. /* We have failed to find a timestamp some other option? */
  557. ts_failed = 1;
  558. }
  559. if ((l != 0) && (ts_failed == 0)) {
  560. le->timestamp = 1;
  561. le->tsval = ntohl(*(ts_ptr + 1));
  562. le->tsecr = *(ts_ptr + 2);
  563. } else
  564. le->timestamp = 0;
  565. le->source_port = th->th_sport;
  566. le->dest_port = th->th_dport;
  567. /* Pull out the csum */
  568. tcp_data_len = m->m_pkthdr.lro_len;
  569. le->next_seq = ntohl(th->th_seq) + tcp_data_len;
  570. le->ack_seq = th->th_ack;
  571. le->window = th->th_win;
  572. csum = th->th_sum;
  573. /* Setup the data pointers */
  574. le->m_head = m;
  575. le->m_tail = m_last(m);
  576. le->append_cnt = 0;
  577. le->ulp_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th, tcp_data_len,
  578. ~csum);
  579. le->append_cnt++;
  580. th->th_sum = csum; /* Restore checksum on first packet. */
  581. }
  582. static void
  583. tcp_push_and_replace(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le, struct mbuf *m, int locked)
  584. {
  585. /*
  586. * Push up the stack the current le and replace
  587. * it with m.
  588. */
  589. struct mbuf *msave;
  590. /* Grab off the next and save it */
  591. msave = le->m_head->m_nextpkt;
  592. le->m_head->m_nextpkt = NULL;
  593. /* Now push out the old le entry */
  594. tcp_flush_out_le(tp, lc, le, locked);
  595. /*
  596. * Now to replace the data properly in the le
  597. * we have to reset the tcp header and
  598. * other fields.
  599. */
  600. tcp_set_le_to_m(lc, le, m);
  601. /* Restore the next list */
  602. m->m_nextpkt = msave;
  603. }
  604. static void
  605. tcp_lro_condense(struct tcpcb *tp, struct lro_ctrl *lc, struct lro_entry *le, int locked)
  606. {
  607. /*
  608. * Walk through the mbuf chain we
  609. * have on tap and compress/condense
  610. * as required.
  611. */
  612. uint32_t *ts_ptr;
  613. struct mbuf *m;
  614. struct tcphdr *th;
  615. uint16_t tcp_data_len, csum_upd;
  616. int l;
  617. /*
  618. * First we must check the lead (m_head)
  619. * we must make sure that it is *not*
  620. * something that should be sent up
  621. * right away (sack etc).
  622. */
  623. again:
  624. m = le->m_head->m_nextpkt;
  625. if (m == NULL) {
  626. /* Just the one left */
  627. return;
  628. }
  629. th = tcp_lro_get_th(le, le->m_head);
  630. KASSERT(th != NULL,
  631. ("le:%p m:%p th comes back NULL?", le, le->m_head));
  632. l = (th->th_off << 2);
  633. l -= sizeof(*th);
  634. ts_ptr = (uint32_t *)(th + 1);
  635. if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) ||
  636. (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16|
  637. TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) {
  638. /*
  639. * Its not the timestamp. We can't
  640. * use this guy as the head.
  641. */
  642. le->m_head->m_nextpkt = m->m_nextpkt;
  643. tcp_push_and_replace(tp, lc, le, m, locked);
  644. goto again;
  645. }
  646. if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) {
  647. /*
  648. * Make sure that previously seen segements/ACKs are delivered
  649. * before this segment, e.g. FIN.
  650. */
  651. le->m_head->m_nextpkt = m->m_nextpkt;
  652. tcp_push_and_replace(tp, lc, le, m, locked);
  653. goto again;
  654. }
  655. while((m = le->m_head->m_nextpkt) != NULL) {
  656. /*
  657. * condense m into le, first
  658. * pull m out of the list.
  659. */
  660. le->m_head->m_nextpkt = m->m_nextpkt;
  661. m->m_nextpkt = NULL;
  662. /* Setup my data */
  663. tcp_data_len = m->m_pkthdr.lro_len;
  664. th = tcp_lro_get_th(le, m);
  665. KASSERT(th != NULL,
  666. ("le:%p m:%p th comes back NULL?", le, m));
  667. ts_ptr = (uint32_t *)(th + 1);
  668. l = (th->th_off << 2);
  669. l -= sizeof(*th);
  670. if (tp && locked) {
  671. tcp_lro_log(tp, lc, le, m, 1, 0, 0, 0, 0);
  672. }
  673. if (le->append_cnt >= lc->lro_ackcnt_lim) {
  674. if (tp && locked) {
  675. tcp_lro_log(tp, lc, le, m, 2, 0, 0, 0, 0);
  676. }
  677. tcp_push_and_replace(tp, lc, le, m, locked);
  678. goto again;
  679. }
  680. if (le->p_len > (lc->lro_length_lim - tcp_data_len)) {
  681. /* Flush now if appending will result in overflow. */
  682. if (tp && locked) {
  683. tcp_lro_log(tp, lc, le, m, 3, tcp_data_len, 0, 0, 0);
  684. }
  685. tcp_push_and_replace(tp, lc, le, m, locked);
  686. goto again;
  687. }
  688. if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) ||
  689. (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16|
  690. TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) {
  691. /*
  692. * Maybe a sack in the new one? We need to
  693. * start all over after flushing the
  694. * current le. We will go up to the beginning
  695. * and flush it (calling the replace again possibly
  696. * or just returning).
  697. */
  698. tcp_push_and_replace(tp, lc, le, m, locked);
  699. goto again;
  700. }
  701. if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0) {
  702. tcp_push_and_replace(tp, lc, le, m, locked);
  703. goto again;
  704. }
  705. if (l != 0) {
  706. uint32_t tsval = ntohl(*(ts_ptr + 1));
  707. /* Make sure timestamp values are increasing. */
  708. if (TSTMP_GT(le->tsval, tsval)) {
  709. tcp_push_and_replace(tp, lc, le, m, locked);
  710. goto again;
  711. }
  712. le->tsval = tsval;
  713. le->tsecr = *(ts_ptr + 2);
  714. }
  715. /* Try to append the new segment. */
  716. if (__predict_false(ntohl(th->th_seq) != le->next_seq ||
  717. (tcp_data_len == 0 &&
  718. le->ack_seq == th->th_ack &&
  719. le->window == th->th_win))) {
  720. /* Out of order packet or duplicate ACK. */
  721. if (tp && locked) {
  722. tcp_lro_log(tp, lc, le, m, 4, tcp_data_len,
  723. ntohl(th->th_seq),
  724. th->th_ack,
  725. th->th_win);
  726. }
  727. tcp_push_and_replace(tp, lc, le, m, locked);
  728. goto again;
  729. }
  730. if (tcp_data_len || SEQ_GT(ntohl(th->th_ack), ntohl(le->ack_seq))) {
  731. le->next_seq += tcp_data_len;
  732. le->ack_seq = th->th_ack;
  733. le->window = th->th_win;
  734. } else if (th->th_ack == le->ack_seq) {
  735. le->window = WIN_MAX(le->window, th->th_win);
  736. }
  737. csum_upd = m->m_pkthdr.lro_csum;
  738. le->ulp_csum += csum_upd;
  739. if (tcp_data_len == 0) {
  740. le->append_cnt++;
  741. le->mbuf_cnt--;
  742. if (tp && locked) {
  743. tcp_lro_log(tp, lc, le, m, 5, tcp_data_len,
  744. ntohl(th->th_seq),
  745. th->th_ack,
  746. th->th_win);
  747. }
  748. m_freem(m);
  749. continue;
  750. }
  751. le->append_cnt++;
  752. le->mbuf_appended++;
  753. le->p_len += tcp_data_len;
  754. /*
  755. * Adjust the mbuf so that m_data points to the first byte of
  756. * the ULP payload. Adjust the mbuf to avoid complications and
  757. * append new segment to existing mbuf chain.
  758. */
  759. m_adj(m, m->m_pkthdr.len - tcp_data_len);
  760. if (tp && locked) {
  761. tcp_lro_log(tp, lc, le, m, 6, tcp_data_len,
  762. ntohl(th->th_seq),
  763. th->th_ack,
  764. th->th_win);
  765. }
  766. m_demote_pkthdr(m);
  767. le->m_tail->m_next = m;
  768. le->m_tail = m_last(m);
  769. }
  770. }
  771. #ifdef TCPHPTS
  772. static void
  773. tcp_queue_pkts(struct tcpcb *tp, struct lro_entry *le)
  774. {
  775. if (tp->t_in_pkt == NULL) {
  776. /* Nothing yet there */
  777. tp->t_in_pkt = le->m_head;
  778. tp->t_tail_pkt = le->m_last_mbuf;
  779. } else {
  780. /* Already some there */
  781. tp->t_tail_pkt->m_nextpkt = le->m_head;
  782. tp->t_tail_pkt = le->m_last_mbuf;
  783. }
  784. le->m_head = NULL;
  785. le->m_last_mbuf = NULL;
  786. }
  787. #endif
  788. void
  789. tcp_lro_flush(struct lro_ctrl *lc, struct lro_entry *le)
  790. {
  791. struct tcpcb *tp = NULL;
  792. int locked = 0;
  793. #ifdef TCPHPTS
  794. struct inpcb *inp = NULL;
  795. int need_wakeup = 0, can_queue = 0;
  796. struct epoch_tracker et;
  797. /* Now lets lookup the inp first */
  798. CURVNET_SET(lc->ifp->if_vnet);
  799. /*
  800. * XXXRRS Currently the common input handler for
  801. * mbuf queuing cannot handle VLAN Tagged. This needs
  802. * to be fixed and the or condition removed (i.e. the
  803. * common code should do the right lookup for the vlan
  804. * tag and anything else that the vlan_input() does).
  805. */
  806. if ((tcplro_stacks_wanting_mbufq == 0) || (le->m_head->m_flags & M_VLANTAG))
  807. goto skip_lookup;
  808. NET_EPOCH_ENTER(et);
  809. switch (le->eh_type) {
  810. #ifdef INET6
  811. case ETHERTYPE_IPV6:
  812. inp = in6_pcblookup(&V_tcbinfo, &le->source_ip6,
  813. le->source_port, &le->dest_ip6,le->dest_port,
  814. INPLOOKUP_WLOCKPCB,
  815. lc->ifp);
  816. break;
  817. #endif
  818. #ifdef INET
  819. case ETHERTYPE_IP:
  820. inp = in_pcblookup(&V_tcbinfo, le->le_ip4->ip_src,
  821. le->source_port, le->le_ip4->ip_dst, le->dest_port,
  822. INPLOOKUP_WLOCKPCB,
  823. lc->ifp);
  824. break;
  825. #endif
  826. }
  827. NET_EPOCH_EXIT(et);
  828. if (inp && ((inp->inp_flags & (INP_DROPPED|INP_TIMEWAIT)) ||
  829. (inp->inp_flags2 & INP_FREED))) {
  830. /* We don't want this guy */
  831. INP_WUNLOCK(inp);
  832. inp = NULL;
  833. }
  834. if (inp && (inp->inp_flags2 & INP_SUPPORTS_MBUFQ)) {
  835. /* The transport supports mbuf queuing */
  836. can_queue = 1;
  837. if (le->need_wakeup ||
  838. ((inp->inp_in_input == 0) &&
  839. ((inp->inp_flags2 & INP_MBUF_QUEUE_READY) == 0))) {
  840. /*
  841. * Either the transport is off on a keep-alive
  842. * (it has the queue_ready flag clear and its
  843. * not already been woken) or the entry has
  844. * some urgent thing (FIN or possibly SACK blocks).
  845. * This means we need to wake the transport up by
  846. * putting it on the input pacer.
  847. */
  848. need_wakeup = 1;
  849. if ((inp->inp_flags2 & INP_DONT_SACK_QUEUE) &&
  850. (le->need_wakeup != 1)) {
  851. /*
  852. * Prohibited from a sack wakeup.
  853. */
  854. need_wakeup = 0;
  855. }
  856. }
  857. /* Do we need to be awoken due to lots of data or acks? */
  858. if ((le->tcp_tot_p_len >= lc->lro_length_lim) ||
  859. (le->mbuf_cnt >= lc->lro_ackcnt_lim))
  860. need_wakeup = 1;
  861. }
  862. if (inp) {
  863. tp = intotcpcb(inp);
  864. locked = 1;
  865. } else
  866. tp = NULL;
  867. if (can_queue) {
  868. counter_u64_add(tcp_inp_lro_direct_queue, 1);
  869. tcp_lro_log(tp, lc, le, NULL, 22, need_wakeup,
  870. inp->inp_flags2, inp->inp_in_input, le->need_wakeup);
  871. tcp_queue_pkts(tp, le);
  872. if (need_wakeup) {
  873. /*
  874. * We must get the guy to wakeup via
  875. * hpts.
  876. */
  877. counter_u64_add(tcp_inp_lro_wokeup_queue, 1);
  878. if (le->need_wakeup)
  879. counter_u64_add(tcp_inp_lro_sack_wake, 1);
  880. tcp_queue_to_input(inp);
  881. }
  882. }
  883. if (inp && (hold_lock_over_compress == 0)) {
  884. /* Unlock it */
  885. locked = 0;
  886. tp = NULL;
  887. counter_u64_add(tcp_inp_lro_locks_taken, 1);
  888. INP_WUNLOCK(inp);
  889. }
  890. if (can_queue == 0) {
  891. skip_lookup:
  892. #endif /* TCPHPTS */
  893. /* Old fashioned lro method */
  894. if (le->m_head != le->m_last_mbuf) {
  895. counter_u64_add(tcp_inp_lro_compressed, 1);
  896. tcp_lro_condense(tp, lc, le, locked);
  897. } else
  898. counter_u64_add(tcp_inp_lro_single_push, 1);
  899. tcp_flush_out_le(tp, lc, le, locked);
  900. #ifdef TCPHPTS
  901. }
  902. if (inp && locked) {
  903. counter_u64_add(tcp_inp_lro_locks_taken, 1);
  904. INP_WUNLOCK(inp);
  905. }
  906. CURVNET_RESTORE();
  907. #endif
  908. lc->lro_flushed++;
  909. bzero(le, sizeof(*le));
  910. LIST_INSERT_HEAD(&lc->lro_free, le, next);
  911. }
  912. #ifdef HAVE_INLINE_FLSLL
  913. #define tcp_lro_msb_64(x) (1ULL << (flsll(x) - 1))
  914. #else
  915. static inline uint64_t
  916. tcp_lro_msb_64(uint64_t x)
  917. {
  918. x |= (x >> 1);
  919. x |= (x >> 2);
  920. x |= (x >> 4);
  921. x |= (x >> 8);
  922. x |= (x >> 16);
  923. x |= (x >> 32);
  924. return (x & ~(x >> 1));
  925. }
  926. #endif
  927. /*
  928. * The tcp_lro_sort() routine is comparable to qsort(), except it has
  929. * a worst case complexity limit of O(MIN(N,64)*N), where N is the
  930. * number of elements to sort and 64 is the number of sequence bits
  931. * available. The algorithm is bit-slicing the 64-bit sequence number,
  932. * sorting one bit at a time from the most significant bit until the
  933. * least significant one, skipping the constant bits. This is
  934. * typically called a radix sort.
  935. */
  936. static void
  937. tcp_lro_sort(struct lro_mbuf_sort *parray, uint32_t size)
  938. {
  939. struct lro_mbuf_sort temp;
  940. uint64_t ones;
  941. uint64_t zeros;
  942. uint32_t x;
  943. uint32_t y;
  944. repeat:
  945. /* for small arrays insertion sort is faster */
  946. if (size <= 12) {
  947. for (x = 1; x < size; x++) {
  948. temp = parray[x];
  949. for (y = x; y > 0 && temp.seq < parray[y - 1].seq; y--)
  950. parray[y] = parray[y - 1];
  951. parray[y] = temp;
  952. }
  953. return;
  954. }
  955. /* compute sequence bits which are constant */
  956. ones = 0;
  957. zeros = 0;
  958. for (x = 0; x != size; x++) {
  959. ones |= parray[x].seq;
  960. zeros |= ~parray[x].seq;
  961. }
  962. /* compute bits which are not constant into "ones" */
  963. ones &= zeros;
  964. if (ones == 0)
  965. return;
  966. /* pick the most significant bit which is not constant */
  967. ones = tcp_lro_msb_64(ones);
  968. /*
  969. * Move entries having cleared sequence bits to the beginning
  970. * of the array:
  971. */
  972. for (x = y = 0; y != size; y++) {
  973. /* skip set bits */
  974. if (parray[y].seq & ones)
  975. continue;
  976. /* swap entries */
  977. temp = parray[x];
  978. parray[x] = parray[y];
  979. parray[y] = temp;
  980. x++;
  981. }
  982. KASSERT(x != 0 && x != size, ("Memory is corrupted\n"));
  983. /* sort zeros */
  984. tcp_lro_sort(parray, x);
  985. /* sort ones */
  986. parray += x;
  987. size -= x;
  988. goto repeat;
  989. }
  990. void
  991. tcp_lro_flush_all(struct lro_ctrl *lc)
  992. {
  993. uint64_t seq;
  994. uint64_t nseq;
  995. unsigned x;
  996. /* check if no mbufs to flush */
  997. if (lc->lro_mbuf_count == 0)
  998. goto done;
  999. /* sort all mbufs according to stream */
  1000. tcp_lro_sort(lc->lro_mbuf_data, lc->lro_mbuf_count);
  1001. /* input data into LRO engine, stream by stream */
  1002. seq = 0;
  1003. for (x = 0; x != lc->lro_mbuf_count; x++) {
  1004. struct mbuf *mb;
  1005. /* get mbuf */
  1006. mb = lc->lro_mbuf_data[x].mb;
  1007. /* get sequence number, masking away the packet index */
  1008. nseq = lc->lro_mbuf_data[x].seq & (-1ULL << 24);
  1009. /* check for new stream */
  1010. if (seq != nseq) {
  1011. seq = nseq;
  1012. /* flush active streams */
  1013. tcp_lro_rx_done(lc);
  1014. }
  1015. /* add packet to LRO engine */
  1016. if (tcp_lro_rx2(lc, mb, 0, 0) != 0) {
  1017. /* input packet to network layer */
  1018. (*lc->ifp->if_input)(lc->ifp, mb);
  1019. lc->lro_queued++;
  1020. lc->lro_flushed++;
  1021. }
  1022. }
  1023. done:
  1024. /* flush active streams */
  1025. tcp_lro_rx_done(lc);
  1026. lc->lro_mbuf_count = 0;
  1027. }
  1028. static void
  1029. lro_set_mtime(struct timeval *tv, struct timespec *ts)
  1030. {
  1031. tv->tv_sec = ts->tv_sec;
  1032. tv->tv_usec = ts->tv_nsec / 1000;
  1033. }
  1034. static int
  1035. tcp_lro_rx2(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum, int use_hash)
  1036. {
  1037. struct lro_entry *le;
  1038. struct ether_header *eh;
  1039. #ifdef INET6
  1040. struct ip6_hdr *ip6 = NULL; /* Keep compiler happy. */
  1041. #endif
  1042. #ifdef INET
  1043. struct ip *ip4 = NULL; /* Keep compiler happy. */
  1044. #endif
  1045. struct tcphdr *th;
  1046. void *l3hdr = NULL; /* Keep compiler happy. */
  1047. uint32_t *ts_ptr;
  1048. tcp_seq seq;
  1049. int error, ip_len, l;
  1050. uint16_t eh_type, tcp_data_len, need_flush;
  1051. struct lro_head *bucket;
  1052. struct timespec arrv;
  1053. /* We expect a contiguous header [eh, ip, tcp]. */
  1054. if ((m->m_flags & (M_TSTMP_LRO|M_TSTMP)) == 0) {
  1055. /* If no hardware or arrival stamp on the packet add arrival */
  1056. nanouptime(&arrv);
  1057. m->m_pkthdr.rcv_tstmp = (arrv.tv_sec * 1000000000) + arrv.tv_nsec;
  1058. m->m_flags |= M_TSTMP_LRO;
  1059. }
  1060. eh = mtod(m, struct ether_header *);
  1061. eh_type = ntohs(eh->ether_type);
  1062. switch (eh_type) {
  1063. #ifdef INET6
  1064. case ETHERTYPE_IPV6:
  1065. {
  1066. CURVNET_SET(lc->ifp->if_vnet);
  1067. if (V_ip6_forwarding != 0) {
  1068. /* XXX-BZ stats but changing lro_ctrl is a problem. */
  1069. CURVNET_RESTORE();
  1070. return (TCP_LRO_CANNOT);
  1071. }
  1072. CURVNET_RESTORE();
  1073. l3hdr = ip6 = (struct ip6_hdr *)(eh + 1);
  1074. error = tcp_lro_rx_ipv6(lc, m, ip6, &th);
  1075. if (error != 0)
  1076. return (error);
  1077. tcp_data_len = ntohs(ip6->ip6_plen);
  1078. ip_len = sizeof(*ip6) + tcp_data_len;
  1079. break;
  1080. }
  1081. #endif
  1082. #ifdef INET
  1083. case ETHERTYPE_IP:
  1084. {
  1085. CURVNET_SET(lc->ifp->if_vnet);
  1086. if (V_ipforwarding != 0) {
  1087. /* XXX-BZ stats but changing lro_ctrl is a problem. */
  1088. CURVNET_RESTORE();
  1089. return (TCP_LRO_CANNOT);
  1090. }
  1091. CURVNET_RESTORE();
  1092. l3hdr = ip4 = (struct ip *)(eh + 1);
  1093. error = tcp_lro_rx_ipv4(lc, m, ip4, &th);
  1094. if (error != 0)
  1095. return (error);
  1096. ip_len = ntohs(ip4->ip_len);
  1097. tcp_data_len = ip_len - sizeof(*ip4);
  1098. break;
  1099. }
  1100. #endif
  1101. /* XXX-BZ what happens in case of VLAN(s)? */
  1102. default:
  1103. return (TCP_LRO_NOT_SUPPORTED);
  1104. }
  1105. /*
  1106. * If the frame is padded beyond the end of the IP packet, then we must
  1107. * trim the extra bytes off.
  1108. */
  1109. l = m->m_pkthdr.len - (ETHER_HDR_LEN + ip_len);
  1110. if (l != 0) {
  1111. if (l < 0)
  1112. /* Truncated packet. */
  1113. return (TCP_LRO_CANNOT);
  1114. m_adj(m, -l);
  1115. }
  1116. /*
  1117. * Check TCP header constraints.
  1118. */
  1119. if (th->th_flags & TH_SYN)
  1120. return (TCP_LRO_CANNOT);
  1121. if ((th->th_flags & ~(TH_ACK | TH_PUSH)) != 0)
  1122. need_flush = 1;
  1123. else
  1124. need_flush = 0;
  1125. l = (th->th_off << 2);
  1126. ts_ptr = (uint32_t *)(th + 1);
  1127. tcp_data_len -= l;
  1128. l -= sizeof(*th);
  1129. if (l != 0 && (__predict_false(l != TCPOLEN_TSTAMP_APPA) ||
  1130. (*ts_ptr != ntohl(TCPOPT_NOP<<24|TCPOPT_NOP<<16|
  1131. TCPOPT_TIMESTAMP<<8|TCPOLEN_TIMESTAMP)))) {
  1132. /*
  1133. * We have an option besides Timestamps, maybe
  1134. * it is a sack (most likely) which means we
  1135. * will probably need to wake up a sleeper (if
  1136. * the guy does queueing).
  1137. */
  1138. need_flush = 2;
  1139. }
  1140. /* If the driver did not pass in the checksum, set it now. */
  1141. if (csum == 0x0000)
  1142. csum = th->th_sum;
  1143. seq = ntohl(th->th_seq);
  1144. if (!use_hash) {
  1145. bucket = &lc->lro_hash[0];
  1146. } else if (M_HASHTYPE_ISHASH(m)) {
  1147. bucket = &lc->lro_hash[m->m_pkthdr.flowid % lc->lro_hashsz];
  1148. } else {
  1149. uint32_t hash;
  1150. switch (eh_type) {
  1151. #ifdef INET
  1152. case ETHERTYPE_IP:
  1153. hash = ip4->ip_src.s_addr + ip4->ip_dst.s_addr;
  1154. break;
  1155. #endif
  1156. #ifdef INET6
  1157. case ETHERTYPE_IPV6:
  1158. hash = ip6->ip6_src.s6_addr32[0] +
  1159. ip6->ip6_dst.s6_addr32[0];
  1160. hash += ip6->ip6_src.s6_addr32[1] +
  1161. ip6->ip6_dst.s6_addr32[1];
  1162. hash += ip6->ip6_src.s6_addr32[2] +
  1163. ip6->ip6_dst.s6_addr32[2];
  1164. hash += ip6->ip6_src.s6_addr32[3] +
  1165. ip6->ip6_dst.s6_addr32[3];
  1166. break;
  1167. #endif
  1168. default:
  1169. hash = 0;
  1170. break;
  1171. }
  1172. hash += th->th_sport + th->th_dport;
  1173. bucket = &lc->lro_hash[hash % lc->lro_hashsz];
  1174. }
  1175. /* Try to find a matching previous segment. */
  1176. LIST_FOREACH(le, bucket, hash_next) {
  1177. if (le->eh_type != eh_type)
  1178. continue;
  1179. if (le->source_port != th->th_sport ||
  1180. le->dest_port != th->th_dport)
  1181. continue;
  1182. switch (eh_type) {
  1183. #ifdef INET6
  1184. case ETHERTYPE_IPV6:
  1185. if (bcmp(&le->source_ip6, &ip6->ip6_src,
  1186. sizeof(struct in6_addr)) != 0 ||
  1187. bcmp(&le->dest_ip6, &ip6->ip6_dst,
  1188. sizeof(struct in6_addr)) != 0)
  1189. continue;
  1190. break;
  1191. #endif
  1192. #ifdef INET
  1193. case ETHERTYPE_IP:
  1194. if (le->source_ip4 != ip4->ip_src.s_addr ||
  1195. le->dest_ip4 != ip4->ip_dst.s_addr)
  1196. continue;
  1197. break;
  1198. #endif
  1199. }
  1200. if (tcp_data_len || SEQ_GT(ntohl(th->th_ack), ntohl(le->ack_seq)) ||
  1201. (th->th_ack == le->ack_seq)) {
  1202. m->m_pkthdr.lro_len = tcp_data_len;
  1203. } else {
  1204. /* no data and old ack */
  1205. m_freem(m);
  1206. return (0);
  1207. }
  1208. if (need_flush)
  1209. le->need_wakeup = need_flush;
  1210. /* Save of the data only csum */
  1211. m->m_pkthdr.rcvif = lc->ifp;
  1212. m->m_pkthdr.lro_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th,
  1213. tcp_data_len, ~csum);
  1214. th->th_sum = csum; /* Restore checksum */
  1215. /* Save off the tail I am appending too (prev) */
  1216. le->m_prev_last = le->m_last_mbuf;
  1217. /* Mark me in the last spot */
  1218. le->m_last_mbuf->m_nextpkt = m;
  1219. /* Now set the tail to me */
  1220. le->m_last_mbuf = m;
  1221. le->mbuf_cnt++;
  1222. m->m_nextpkt = NULL;
  1223. /* Add to the total size of data */
  1224. le->tcp_tot_p_len += tcp_data_len;
  1225. lro_set_mtime(&le->mtime, &arrv);
  1226. return (0);
  1227. }
  1228. /* Try to find an empty slot. */
  1229. if (LIST_EMPTY(&lc->lro_free))
  1230. return (TCP_LRO_NO_ENTRIES);
  1231. /* Start a new segment chain. */
  1232. le = LIST_FIRST(&lc->lro_free);
  1233. LIST_REMOVE(le, next);
  1234. tcp_lro_active_insert(lc, bucket, le);
  1235. lro_set_mtime(&le->mtime, &arrv);
  1236. /* Start filling in details. */
  1237. switch (eh_type) {
  1238. #ifdef INET6
  1239. case ETHERTYPE_IPV6:
  1240. le->le_ip6 = ip6;
  1241. le->source_ip6 = ip6->ip6_src;
  1242. le->dest_ip6 = ip6->ip6_dst;
  1243. le->eh_type = eh_type;
  1244. le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN - sizeof(*ip6);
  1245. break;
  1246. #endif
  1247. #ifdef INET
  1248. case ETHERTYPE_IP:
  1249. le->le_ip4 = ip4;
  1250. le->source_ip4 = ip4->ip_src.s_addr;
  1251. le->dest_ip4 = ip4->ip_dst.s_addr;
  1252. le->eh_type = eh_type;
  1253. le->p_len = m->m_pkthdr.len - ETHER_HDR_LEN;
  1254. break;
  1255. #endif
  1256. }
  1257. le->source_port = th->th_sport;
  1258. le->dest_port = th->th_dport;
  1259. le->next_seq = seq + tcp_data_len;
  1260. le->ack_seq = th->th_ack;
  1261. le->window = th->th_win;
  1262. if (l != 0) {
  1263. le->timestamp = 1;
  1264. le->tsval = ntohl(*(ts_ptr + 1));
  1265. le->tsecr = *(ts_ptr + 2);
  1266. }
  1267. KASSERT(le->ulp_csum == 0, ("%s: le=%p le->ulp_csum=0x%04x\n",
  1268. __func__, le, le->ulp_csum));
  1269. le->append_cnt = 0;
  1270. le->ulp_csum = tcp_lro_rx_csum_fixup(le, l3hdr, th, tcp_data_len,
  1271. ~csum);
  1272. le->append_cnt++;
  1273. th->th_sum = csum; /* Restore checksum */
  1274. le->m_head = m;
  1275. m->m_pkthdr.rcvif = lc->ifp;
  1276. le->mbuf_cnt = 1;
  1277. if (need_flush)
  1278. le->need_wakeup = need_flush;
  1279. else
  1280. le->need_wakeup = 0;
  1281. le->m_tail = m_last(m);
  1282. le->m_last_mbuf = m;
  1283. m->m_nextpkt = NULL;
  1284. le->m_prev_last = NULL;
  1285. /*
  1286. * We keep the total size here for cross checking when we may need
  1287. * to flush/wakeup in the MBUF_QUEUE case.
  1288. */
  1289. le->tcp_tot_p_len = tcp_data_len;
  1290. m->m_pkthdr.lro_len = tcp_data_len;
  1291. return (0);
  1292. }
  1293. int
  1294. tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum)
  1295. {
  1296. return tcp_lro_rx2(lc, m, csum, 1);
  1297. }
  1298. void
  1299. tcp_lro_queue_mbuf(struct lro_ctrl *lc, struct mbuf *mb)
  1300. {
  1301. struct timespec arrv;
  1302. /* sanity checks */
  1303. if (__predict_false(lc->ifp == NULL || lc->lro_mbuf_data == NULL ||
  1304. lc->lro_mbuf_max == 0)) {
  1305. /* packet drop */
  1306. m_freem(mb);
  1307. return;
  1308. }
  1309. /* check if packet is not LRO capable */
  1310. if (__predict_false(mb->m_pkthdr.csum_flags == 0 ||
  1311. (lc->ifp->if_capenable & IFCAP_LRO) == 0)) {
  1312. /* input packet to network layer */
  1313. (*lc->ifp->if_input) (lc->ifp, mb);
  1314. return;
  1315. }
  1316. /* Arrival Stamp the packet */
  1317. if ((mb->m_flags & M_TSTMP) == 0) {
  1318. /* If no hardware or arrival stamp on the packet add arrival */
  1319. nanouptime(&arrv);
  1320. mb->m_pkthdr.rcv_tstmp = ((arrv.tv_sec * 1000000000) +
  1321. arrv.tv_nsec);
  1322. mb->m_flags |= M_TSTMP_LRO;
  1323. }
  1324. /* create sequence number */
  1325. lc->lro_mbuf_data[lc->lro_mbuf_count].seq =
  1326. (((uint64_t)M_HASHTYPE_GET(mb)) << 56) |
  1327. (((uint64_t)mb->m_pkthdr.flowid) << 24) |
  1328. ((uint64_t)lc->lro_mbuf_count);
  1329. /* enter mbuf */
  1330. lc->lro_mbuf_data[lc->lro_mbuf_count].mb = mb;
  1331. /* flush if array is full */
  1332. if (__predict_false(++lc->lro_mbuf_count == lc->lro_mbuf_max))
  1333. tcp_lro_flush_all(lc);
  1334. }
  1335. /* end */