123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339 |
- /*
- * IPV4 GSO/GRO offload support
- * Linux INET implementation
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- *
- * TCPv4 GSO/GRO support
- */
- #include <linux/skbuff.h>
- #include <net/tcp.h>
- #include <net/protocol.h>
- static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
- unsigned int seq, unsigned int mss)
- {
- while (skb) {
- if (before(ts_seq, seq + mss)) {
- skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
- skb_shinfo(skb)->tskey = ts_seq;
- return;
- }
- skb = skb->next;
- seq += mss;
- }
- }
- static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
- netdev_features_t features)
- {
- if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
- return ERR_PTR(-EINVAL);
- if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
- return ERR_PTR(-EINVAL);
- if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
- const struct iphdr *iph = ip_hdr(skb);
- struct tcphdr *th = tcp_hdr(skb);
- /* Set up checksum pseudo header, usually expect stack to
- * have done this already.
- */
- th->check = 0;
- skb->ip_summed = CHECKSUM_PARTIAL;
- __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
- }
- return tcp_gso_segment(skb, features);
- }
- struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
- netdev_features_t features)
- {
- struct sk_buff *segs = ERR_PTR(-EINVAL);
- unsigned int sum_truesize = 0;
- struct tcphdr *th;
- unsigned int thlen;
- unsigned int seq;
- __be32 delta;
- unsigned int oldlen;
- unsigned int mss;
- struct sk_buff *gso_skb = skb;
- __sum16 newcheck;
- bool ooo_okay, copy_destructor;
- th = tcp_hdr(skb);
- thlen = th->doff * 4;
- if (thlen < sizeof(*th))
- goto out;
- if (!pskb_may_pull(skb, thlen))
- goto out;
- oldlen = (u16)~skb->len;
- __skb_pull(skb, thlen);
- mss = skb_shinfo(skb)->gso_size;
- if (unlikely(skb->len <= mss))
- goto out;
- if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
- /* Packet is from an untrusted source, reset gso_segs. */
- skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
- segs = NULL;
- goto out;
- }
- copy_destructor = gso_skb->destructor == tcp_wfree;
- ooo_okay = gso_skb->ooo_okay;
- /* All segments but the first should have ooo_okay cleared */
- skb->ooo_okay = 0;
- segs = skb_segment(skb, features);
- if (IS_ERR(segs))
- goto out;
- /* Only first segment might have ooo_okay set */
- segs->ooo_okay = ooo_okay;
- /* GSO partial and frag_list segmentation only requires splitting
- * the frame into an MSS multiple and possibly a remainder, both
- * cases return a GSO skb. So update the mss now.
- */
- if (skb_is_gso(segs))
- mss *= skb_shinfo(segs)->gso_segs;
- delta = htonl(oldlen + (thlen + mss));
- skb = segs;
- th = tcp_hdr(skb);
- seq = ntohl(th->seq);
- if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
- tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
- newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
- (__force u32)delta));
- while (skb->next) {
- th->fin = th->psh = 0;
- th->check = newcheck;
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- gso_reset_checksum(skb, ~th->check);
- else
- th->check = gso_make_checksum(skb, ~th->check);
- seq += mss;
- if (copy_destructor) {
- skb->destructor = gso_skb->destructor;
- skb->sk = gso_skb->sk;
- sum_truesize += skb->truesize;
- }
- skb = skb->next;
- th = tcp_hdr(skb);
- th->seq = htonl(seq);
- th->cwr = 0;
- }
- /* Following permits TCP Small Queues to work well with GSO :
- * The callback to TCP stack will be called at the time last frag
- * is freed at TX completion, and not right now when gso_skb
- * is freed by GSO engine
- */
- if (copy_destructor) {
- swap(gso_skb->sk, skb->sk);
- swap(gso_skb->destructor, skb->destructor);
- sum_truesize += skb->truesize;
- atomic_add(sum_truesize - gso_skb->truesize,
- &skb->sk->sk_wmem_alloc);
- }
- delta = htonl(oldlen + (skb_tail_pointer(skb) -
- skb_transport_header(skb)) +
- skb->data_len);
- th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
- (__force u32)delta));
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- gso_reset_checksum(skb, ~th->check);
- else
- th->check = gso_make_checksum(skb, ~th->check);
- out:
- return segs;
- }
- struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
- {
- struct sk_buff **pp = NULL;
- struct sk_buff *p;
- struct tcphdr *th;
- struct tcphdr *th2;
- unsigned int len;
- unsigned int thlen;
- __be32 flags;
- unsigned int mss = 1;
- unsigned int hlen;
- unsigned int off;
- int flush = 1;
- int i;
- off = skb_gro_offset(skb);
- hlen = off + sizeof(*th);
- th = skb_gro_header_fast(skb, off);
- if (skb_gro_header_hard(skb, hlen)) {
- th = skb_gro_header_slow(skb, hlen, off);
- if (unlikely(!th))
- goto out;
- }
- thlen = th->doff * 4;
- if (thlen < sizeof(*th))
- goto out;
- hlen = off + thlen;
- if (skb_gro_header_hard(skb, hlen)) {
- th = skb_gro_header_slow(skb, hlen, off);
- if (unlikely(!th))
- goto out;
- }
- skb_gro_pull(skb, thlen);
- len = skb_gro_len(skb);
- flags = tcp_flag_word(th);
- for (; (p = *head); head = &p->next) {
- if (!NAPI_GRO_CB(p)->same_flow)
- continue;
- th2 = tcp_hdr(p);
- if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
- NAPI_GRO_CB(p)->same_flow = 0;
- continue;
- }
- goto found;
- }
- goto out_check_final;
- found:
- /* Include the IP ID check below from the inner most IP hdr */
- flush = NAPI_GRO_CB(p)->flush;
- flush |= (__force int)(flags & TCP_FLAG_CWR);
- flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
- ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
- flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
- for (i = sizeof(*th); i < thlen; i += 4)
- flush |= *(u32 *)((u8 *)th + i) ^
- *(u32 *)((u8 *)th2 + i);
- /* When we receive our second frame we can made a decision on if we
- * continue this flow as an atomic flow with a fixed ID or if we use
- * an incrementing ID.
- */
- if (NAPI_GRO_CB(p)->flush_id != 1 ||
- NAPI_GRO_CB(p)->count != 1 ||
- !NAPI_GRO_CB(p)->is_atomic)
- flush |= NAPI_GRO_CB(p)->flush_id;
- else
- NAPI_GRO_CB(p)->is_atomic = false;
- mss = skb_shinfo(p)->gso_size;
- flush |= (len - 1) >= mss;
- flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
- if (flush || skb_gro_receive(head, skb)) {
- mss = 1;
- goto out_check_final;
- }
- p = *head;
- th2 = tcp_hdr(p);
- tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
- out_check_final:
- flush = len < mss;
- flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
- TCP_FLAG_RST | TCP_FLAG_SYN |
- TCP_FLAG_FIN));
- if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
- pp = head;
- out:
- NAPI_GRO_CB(skb)->flush |= (flush != 0);
- return pp;
- }
- int tcp_gro_complete(struct sk_buff *skb)
- {
- struct tcphdr *th = tcp_hdr(skb);
- skb->csum_start = (unsigned char *)th - skb->head;
- skb->csum_offset = offsetof(struct tcphdr, check);
- skb->ip_summed = CHECKSUM_PARTIAL;
- skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
- if (th->cwr)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
- return 0;
- }
- EXPORT_SYMBOL(tcp_gro_complete);
- static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
- {
- /* Don't bother verifying checksum if we're going to flush anyway. */
- if (!NAPI_GRO_CB(skb)->flush &&
- skb_gro_checksum_validate(skb, IPPROTO_TCP,
- inet_gro_compute_pseudo)) {
- NAPI_GRO_CB(skb)->flush = 1;
- return NULL;
- }
- return tcp_gro_receive(head, skb);
- }
- static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
- {
- const struct iphdr *iph = ip_hdr(skb);
- struct tcphdr *th = tcp_hdr(skb);
- th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
- iph->daddr, 0);
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
- if (NAPI_GRO_CB(skb)->is_atomic)
- skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
- return tcp_gro_complete(skb);
- }
- static const struct net_offload tcpv4_offload = {
- .callbacks = {
- .gso_segment = tcp4_gso_segment,
- .gro_receive = tcp4_gro_receive,
- .gro_complete = tcp4_gro_complete,
- },
- };
- int __init tcpv4_offload_init(void)
- {
- return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
- }
|