Revert "net: Rename mono_delivery_time to tstamp_type for scalabilty"

This reverts commit 8dde02229b which is
commit 4d25ca2d6801cfcf26f7f39c561611ba5be99bf8 upstream.

It breaks the Android kernel abi and can be brought back in the future
in an abi-safe way if it is really needed.

Bug: 161946584
Change-Id: Ibbbf947cdc47fad644f5a8fc306659c11930c426
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman
2025-05-20 09:49:18 +00:00
parent cf8861e19d
commit 3d14104593
17 changed files with 51 additions and 78 deletions

View File

@@ -691,11 +691,6 @@ typedef unsigned int sk_buff_data_t;
typedef unsigned char *sk_buff_data_t; typedef unsigned char *sk_buff_data_t;
#endif #endif
enum skb_tstamp_type {
SKB_CLOCK_REALTIME,
SKB_CLOCK_MONOTONIC,
};
/** /**
* DOC: Basic sk_buff geometry * DOC: Basic sk_buff geometry
* *
@@ -815,8 +810,10 @@ enum skb_tstamp_type {
* @dst_pending_confirm: need to confirm neighbour * @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB * @decrypted: Decrypted SKB
* @slow_gro: state present at GRO time, slower prepare step required * @slow_gro: state present at GRO time, slower prepare step required
* @tstamp_type: When set, skb->tstamp has the * @mono_delivery_time: When set, skb->tstamp has the
* delivery_time clock base of skb->tstamp. * delivery_time in mono clock base (i.e. EDT). Otherwise, the
* skb->tstamp has the (rcv) timestamp at ingress and
* delivery_time at egress.
* @napi_id: id of the NAPI struct this skb came from * @napi_id: id of the NAPI struct this skb came from
* @sender_cpu: (aka @napi_id) source CPU in XPS * @sender_cpu: (aka @napi_id) source CPU in XPS
* @alloc_cpu: CPU which did the skb allocation. * @alloc_cpu: CPU which did the skb allocation.
@@ -944,7 +941,7 @@ struct sk_buff {
/* private: */ /* private: */
__u8 __mono_tc_offset[0]; __u8 __mono_tc_offset[0];
/* public: */ /* public: */
__u8 tstamp_type:1; /* See skb_tstamp_type */ __u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */
#ifdef CONFIG_NET_XGRESS #ifdef CONFIG_NET_XGRESS
__u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */ __u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */
__u8 tc_skip_classify:1; __u8 tc_skip_classify:1;
@@ -4201,7 +4198,7 @@ static inline void skb_get_new_timestampns(const struct sk_buff *skb,
static inline void __net_timestamp(struct sk_buff *skb) static inline void __net_timestamp(struct sk_buff *skb)
{ {
skb->tstamp = ktime_get_real(); skb->tstamp = ktime_get_real();
skb->tstamp_type = SKB_CLOCK_REALTIME; skb->mono_delivery_time = 0;
} }
static inline ktime_t net_timedelta(ktime_t t) static inline ktime_t net_timedelta(ktime_t t)
@@ -4210,33 +4207,10 @@ static inline ktime_t net_timedelta(ktime_t t)
} }
static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt, static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt,
u8 tstamp_type) bool mono)
{ {
skb->tstamp = kt; skb->tstamp = kt;
skb->mono_delivery_time = kt && mono;
if (kt)
skb->tstamp_type = tstamp_type;
else
skb->tstamp_type = SKB_CLOCK_REALTIME;
}
static inline void skb_set_delivery_type_by_clockid(struct sk_buff *skb,
ktime_t kt, clockid_t clockid)
{
u8 tstamp_type = SKB_CLOCK_REALTIME;
switch (clockid) {
case CLOCK_REALTIME:
break;
case CLOCK_MONOTONIC:
tstamp_type = SKB_CLOCK_MONOTONIC;
break;
default:
WARN_ON_ONCE(1);
kt = 0;
}
skb_set_delivery_time(skb, kt, tstamp_type);
} }
DECLARE_STATIC_KEY_FALSE(netstamp_needed_key); DECLARE_STATIC_KEY_FALSE(netstamp_needed_key);
@@ -4246,8 +4220,8 @@ DECLARE_STATIC_KEY_FALSE(netstamp_needed_key);
*/ */
static inline void skb_clear_delivery_time(struct sk_buff *skb) static inline void skb_clear_delivery_time(struct sk_buff *skb)
{ {
if (skb->tstamp_type) { if (skb->mono_delivery_time) {
skb->tstamp_type = SKB_CLOCK_REALTIME; skb->mono_delivery_time = 0;
if (static_branch_unlikely(&netstamp_needed_key)) if (static_branch_unlikely(&netstamp_needed_key))
skb->tstamp = ktime_get_real(); skb->tstamp = ktime_get_real();
else else
@@ -4257,7 +4231,7 @@ static inline void skb_clear_delivery_time(struct sk_buff *skb)
static inline void skb_clear_tstamp(struct sk_buff *skb) static inline void skb_clear_tstamp(struct sk_buff *skb)
{ {
if (skb->tstamp_type) if (skb->mono_delivery_time)
return; return;
skb->tstamp = 0; skb->tstamp = 0;
@@ -4265,7 +4239,7 @@ static inline void skb_clear_tstamp(struct sk_buff *skb)
static inline ktime_t skb_tstamp(const struct sk_buff *skb) static inline ktime_t skb_tstamp(const struct sk_buff *skb)
{ {
if (skb->tstamp_type) if (skb->mono_delivery_time)
return 0; return 0;
return skb->tstamp; return skb->tstamp;
@@ -4273,7 +4247,7 @@ static inline ktime_t skb_tstamp(const struct sk_buff *skb)
static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond) static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond)
{ {
if (skb->tstamp_type != SKB_CLOCK_MONOTONIC && skb->tstamp) if (!skb->mono_delivery_time && skb->tstamp)
return skb->tstamp; return skb->tstamp;
if (static_branch_unlikely(&netstamp_needed_key) || cond) if (static_branch_unlikely(&netstamp_needed_key) || cond)

View File

@@ -76,7 +76,7 @@ struct frag_v6_compare_key {
* @stamp: timestamp of the last received fragment * @stamp: timestamp of the last received fragment
* @len: total length of the original datagram * @len: total length of the original datagram
* @meat: length of received fragments so far * @meat: length of received fragments so far
* @tstamp_type: stamp has a mono delivery time (EDT) * @mono_delivery_time: stamp has a mono delivery time (EDT)
* @flags: fragment queue flags * @flags: fragment queue flags
* @max_size: maximum received fragment size * @max_size: maximum received fragment size
* @fqdir: pointer to struct fqdir * @fqdir: pointer to struct fqdir
@@ -97,7 +97,7 @@ struct inet_frag_queue {
ktime_t stamp; ktime_t stamp;
int len; int len;
int meat; int meat;
u8 tstamp_type; u8 mono_delivery_time;
__u8 flags; __u8 flags;
u16 max_size; u16 max_size;
struct fqdir *fqdir; struct fqdir *fqdir;

View File

@@ -32,7 +32,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
struct sk_buff *)) struct sk_buff *))
{ {
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
u8 tstamp_type = skb->tstamp_type; bool mono_delivery_time = skb->mono_delivery_time;
unsigned int hlen, ll_rs, mtu; unsigned int hlen, ll_rs, mtu;
ktime_t tstamp = skb->tstamp; ktime_t tstamp = skb->tstamp;
struct ip_frag_state state; struct ip_frag_state state;
@@ -82,7 +82,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
if (iter.frag) if (iter.frag)
ip_fraglist_prepare(skb, &iter); ip_fraglist_prepare(skb, &iter);
skb_set_delivery_time(skb, tstamp, tstamp_type); skb_set_delivery_time(skb, tstamp, mono_delivery_time);
err = output(net, sk, data, skb); err = output(net, sk, data, skb);
if (err || !iter.frag) if (err || !iter.frag)
break; break;
@@ -113,7 +113,7 @@ slow_path:
goto blackhole; goto blackhole;
} }
skb_set_delivery_time(skb2, tstamp, tstamp_type); skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
err = output(net, sk, data, skb2); err = output(net, sk, data, skb2);
if (err) if (err)
goto blackhole; goto blackhole;

View File

@@ -2197,7 +2197,7 @@ EXPORT_SYMBOL(net_disable_timestamp);
static inline void net_timestamp_set(struct sk_buff *skb) static inline void net_timestamp_set(struct sk_buff *skb)
{ {
skb->tstamp = 0; skb->tstamp = 0;
skb->tstamp_type = SKB_CLOCK_REALTIME; skb->mono_delivery_time = 0;
if (static_branch_unlikely(&netstamp_needed_key)) if (static_branch_unlikely(&netstamp_needed_key))
skb->tstamp = ktime_get_real(); skb->tstamp = ktime_get_real();
} }

View File

@@ -7752,13 +7752,13 @@ BPF_CALL_3(bpf_skb_set_tstamp, struct sk_buff *, skb,
if (!tstamp) if (!tstamp)
return -EINVAL; return -EINVAL;
skb->tstamp = tstamp; skb->tstamp = tstamp;
skb->tstamp_type = SKB_CLOCK_MONOTONIC; skb->mono_delivery_time = 1;
break; break;
case BPF_SKB_TSTAMP_UNSPEC: case BPF_SKB_TSTAMP_UNSPEC:
if (tstamp) if (tstamp)
return -EINVAL; return -EINVAL;
skb->tstamp = 0; skb->tstamp = 0;
skb->tstamp_type = SKB_CLOCK_REALTIME; skb->mono_delivery_time = 0;
break; break;
default: default:
return -EINVAL; return -EINVAL;
@@ -9461,7 +9461,7 @@ static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog,
TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK); TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK);
*insn++ = BPF_JMP32_IMM(BPF_JNE, tmp_reg, *insn++ = BPF_JMP32_IMM(BPF_JNE, tmp_reg,
TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK, 2); TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK, 2);
/* skb->tc_at_ingress && skb->tstamp_type, /* skb->tc_at_ingress && skb->mono_delivery_time,
* read 0 as the (rcv) timestamp. * read 0 as the (rcv) timestamp.
*/ */
*insn++ = BPF_MOV64_IMM(value_reg, 0); *insn++ = BPF_MOV64_IMM(value_reg, 0);
@@ -9486,7 +9486,7 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog,
* the bpf prog is aware the tstamp could have delivery time. * the bpf prog is aware the tstamp could have delivery time.
* Thus, write skb->tstamp as is if tstamp_type_access is true. * Thus, write skb->tstamp as is if tstamp_type_access is true.
* Otherwise, writing at ingress will have to clear the * Otherwise, writing at ingress will have to clear the
* skb->tstamp_type bit also. * mono_delivery_time bit also.
*/ */
if (!prog->tstamp_type_access) { if (!prog->tstamp_type_access) {
__u8 tmp_reg = BPF_REG_AX; __u8 tmp_reg = BPF_REG_AX;
@@ -9496,7 +9496,7 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog,
*insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, TC_AT_INGRESS_MASK, 1); *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, TC_AT_INGRESS_MASK, 1);
/* goto <store> */ /* goto <store> */
*insn++ = BPF_JMP_A(2); *insn++ = BPF_JMP_A(2);
/* <clear>: skb->tstamp_type */ /* <clear>: mono_delivery_time */
*insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_MONO_DELIVERY_TIME_MASK); *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_MONO_DELIVERY_TIME_MASK);
*insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, SKB_BF_MONO_TC_OFFSET); *insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, SKB_BF_MONO_TC_OFFSET);
} }

View File

@@ -130,7 +130,7 @@ static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
goto err; goto err;
fq->q.stamp = skb->tstamp; fq->q.stamp = skb->tstamp;
fq->q.tstamp_type = skb->tstamp_type; fq->q.mono_delivery_time = skb->mono_delivery_time;
if (frag_type == LOWPAN_DISPATCH_FRAG1) if (frag_type == LOWPAN_DISPATCH_FRAG1)
fq->q.flags |= INET_FRAG_FIRST_IN; fq->q.flags |= INET_FRAG_FIRST_IN;

View File

@@ -619,7 +619,7 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
skb_mark_not_on_list(head); skb_mark_not_on_list(head);
head->prev = NULL; head->prev = NULL;
head->tstamp = q->stamp; head->tstamp = q->stamp;
head->tstamp_type = q->tstamp_type; head->mono_delivery_time = q->mono_delivery_time;
if (sk) if (sk)
refcount_add(sum_truesize - head_truesize, &sk->sk_wmem_alloc); refcount_add(sum_truesize - head_truesize, &sk->sk_wmem_alloc);

View File

@@ -360,7 +360,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
qp->iif = dev->ifindex; qp->iif = dev->ifindex;
qp->q.stamp = skb->tstamp; qp->q.stamp = skb->tstamp;
qp->q.tstamp_type = skb->tstamp_type; qp->q.mono_delivery_time = skb->mono_delivery_time;
qp->q.meat += skb->len; qp->q.meat += skb->len;
qp->ecn |= ecn; qp->ecn |= ecn;
add_frag_mem_limit(qp->q.fqdir, skb->truesize); add_frag_mem_limit(qp->q.fqdir, skb->truesize);

View File

@@ -764,7 +764,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
{ {
struct iphdr *iph; struct iphdr *iph;
struct sk_buff *skb2; struct sk_buff *skb2;
u8 tstamp_type = skb->tstamp_type; bool mono_delivery_time = skb->mono_delivery_time;
struct rtable *rt = skb_rtable(skb); struct rtable *rt = skb_rtable(skb);
unsigned int mtu, hlen, ll_rs; unsigned int mtu, hlen, ll_rs;
struct ip_fraglist_iter iter; struct ip_fraglist_iter iter;
@@ -856,7 +856,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
} }
} }
skb_set_delivery_time(skb, tstamp, tstamp_type); skb_set_delivery_time(skb, tstamp, mono_delivery_time);
err = output(net, sk, skb); err = output(net, sk, skb);
if (!err) if (!err)
@@ -912,7 +912,7 @@ slow_path:
/* /*
* Put this fragment into the sending queue. * Put this fragment into the sending queue.
*/ */
skb_set_delivery_time(skb2, tstamp, tstamp_type); skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
err = output(net, sk, skb2); err = output(net, sk, skb2);
if (err) if (err)
goto fail; goto fail;
@@ -1648,8 +1648,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
arg->csumoffset) = csum_fold(csum_add(nskb->csum, arg->csumoffset) = csum_fold(csum_add(nskb->csum,
arg->csum)); arg->csum));
nskb->ip_summed = CHECKSUM_NONE; nskb->ip_summed = CHECKSUM_NONE;
if (transmit_time) nskb->mono_delivery_time = !!transmit_time;
nskb->tstamp_type = SKB_CLOCK_MONOTONIC;
if (txhash) if (txhash)
skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4); skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4);
ip_push_pending_frames(sk, &fl4); ip_push_pending_frames(sk, &fl4);

View File

@@ -1272,7 +1272,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
tp = tcp_sk(sk); tp = tcp_sk(sk);
prior_wstamp = tp->tcp_wstamp_ns; prior_wstamp = tp->tcp_wstamp_ns;
tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache); tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC); skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true);
if (clone_it) { if (clone_it) {
oskb = skb; oskb = skb;
@@ -1613,7 +1613,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
skb_split(skb, buff, len); skb_split(skb, buff, len);
skb_set_delivery_time(buff, skb->tstamp, SKB_CLOCK_MONOTONIC); skb_set_delivery_time(buff, skb->tstamp, true);
tcp_fragment_tstamp(skb, buff); tcp_fragment_tstamp(skb, buff);
old_factor = tcp_skb_pcount(skb); old_factor = tcp_skb_pcount(skb);
@@ -2709,7 +2709,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
/* "skb_mstamp_ns" is used as a start point for the retransmit timer */ /* "skb_mstamp_ns" is used as a start point for the retransmit timer */
tp->tcp_wstamp_ns = tp->tcp_clock_cache; tp->tcp_wstamp_ns = tp->tcp_clock_cache;
skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC); skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true);
list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
tcp_init_tso_segs(skb, mss_now); tcp_init_tso_segs(skb, mss_now);
goto repair; /* Skip network transmission */ goto repair; /* Skip network transmission */
@@ -3695,11 +3695,11 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
#ifdef CONFIG_SYN_COOKIES #ifdef CONFIG_SYN_COOKIES
if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok)) if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
skb_set_delivery_time(skb, cookie_init_timestamp(req, now), skb_set_delivery_time(skb, cookie_init_timestamp(req, now),
SKB_CLOCK_MONOTONIC); true);
else else
#endif #endif
{ {
skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC); skb_set_delivery_time(skb, now, true);
if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */ if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb); tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
} }
@@ -3748,7 +3748,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb, bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb,
synack_type, &opts); synack_type, &opts);
skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC); skb_set_delivery_time(skb, now, true);
tcp_add_tx_delay(skb, tp); tcp_add_tx_delay(skb, tp);
return skb; return skb;
@@ -3930,7 +3930,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, SKB_CLOCK_MONOTONIC); skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, true);
/* Now full SYN+DATA was cloned and sent (or not), /* Now full SYN+DATA was cloned and sent (or not),
* remove the SYN from the original skb (syn_data) * remove the SYN from the original skb (syn_data)

View File

@@ -864,7 +864,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
struct rt6_info *rt = dst_rt6_info(skb_dst(skb)); struct rt6_info *rt = dst_rt6_info(skb_dst(skb));
struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
inet6_sk(skb->sk) : NULL; inet6_sk(skb->sk) : NULL;
u8 tstamp_type = skb->tstamp_type; bool mono_delivery_time = skb->mono_delivery_time;
struct ip6_frag_state state; struct ip6_frag_state state;
unsigned int mtu, hlen, nexthdr_offset; unsigned int mtu, hlen, nexthdr_offset;
ktime_t tstamp = skb->tstamp; ktime_t tstamp = skb->tstamp;
@@ -958,7 +958,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
if (iter.frag) if (iter.frag)
ip6_fraglist_prepare(skb, &iter); ip6_fraglist_prepare(skb, &iter);
skb_set_delivery_time(skb, tstamp, tstamp_type); skb_set_delivery_time(skb, tstamp, mono_delivery_time);
err = output(net, sk, skb); err = output(net, sk, skb);
if (!err) if (!err)
IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
@@ -1019,7 +1019,7 @@ slow_path:
/* /*
* Put this fragment into the sending queue. * Put this fragment into the sending queue.
*/ */
skb_set_delivery_time(frag, tstamp, tstamp_type); skb_set_delivery_time(frag, tstamp, mono_delivery_time);
err = output(net, sk, frag); err = output(net, sk, frag);
if (err) if (err)
goto fail; goto fail;

View File

@@ -126,7 +126,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
struct sk_buff *)) struct sk_buff *))
{ {
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
u8 tstamp_type = skb->tstamp_type; bool mono_delivery_time = skb->mono_delivery_time;
ktime_t tstamp = skb->tstamp; ktime_t tstamp = skb->tstamp;
struct ip6_frag_state state; struct ip6_frag_state state;
u8 *prevhdr, nexthdr = 0; u8 *prevhdr, nexthdr = 0;
@@ -192,7 +192,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
if (iter.frag) if (iter.frag)
ip6_fraglist_prepare(skb, &iter); ip6_fraglist_prepare(skb, &iter);
skb_set_delivery_time(skb, tstamp, tstamp_type); skb_set_delivery_time(skb, tstamp, mono_delivery_time);
err = output(net, sk, data, skb); err = output(net, sk, data, skb);
if (err || !iter.frag) if (err || !iter.frag)
break; break;
@@ -225,7 +225,7 @@ slow_path:
goto blackhole; goto blackhole;
} }
skb_set_delivery_time(skb2, tstamp, tstamp_type); skb_set_delivery_time(skb2, tstamp, mono_delivery_time);
err = output(net, sk, data, skb2); err = output(net, sk, data, skb2);
if (err) if (err)
goto blackhole; goto blackhole;

View File

@@ -268,7 +268,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
fq->iif = dev->ifindex; fq->iif = dev->ifindex;
fq->q.stamp = skb->tstamp; fq->q.stamp = skb->tstamp;
fq->q.tstamp_type = skb->tstamp_type; fq->q.mono_delivery_time = skb->mono_delivery_time;
fq->q.meat += skb->len; fq->q.meat += skb->len;
fq->ecn |= ecn; fq->ecn |= ecn;
if (payload_len > fq->q.max_size) if (payload_len > fq->q.max_size)

View File

@@ -198,7 +198,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
fq->iif = dev->ifindex; fq->iif = dev->ifindex;
fq->q.stamp = skb->tstamp; fq->q.stamp = skb->tstamp;
fq->q.tstamp_type = skb->tstamp_type; fq->q.mono_delivery_time = skb->mono_delivery_time;
fq->q.meat += skb->len; fq->q.meat += skb->len;
fq->ecn |= ecn; fq->ecn |= ecn;
add_frag_mem_limit(fq->q.fqdir, skb->truesize); add_frag_mem_limit(fq->q.fqdir, skb->truesize);

View File

@@ -937,7 +937,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
mark = inet_twsk(sk)->tw_mark; mark = inet_twsk(sk)->tw_mark;
else else
mark = READ_ONCE(sk->sk_mark); mark = READ_ONCE(sk->sk_mark);
skb_set_delivery_time(buff, tcp_transmit_time(sk), SKB_CLOCK_MONOTONIC); skb_set_delivery_time(buff, tcp_transmit_time(sk), true);
} }
if (txhash) { if (txhash) {
/* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */ /* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */

View File

@@ -54,8 +54,8 @@ TC_INDIRECT_SCOPE int tcf_bpf_act(struct sk_buff *skb,
bpf_compute_data_pointers(skb); bpf_compute_data_pointers(skb);
filter_res = bpf_prog_run(filter, skb); filter_res = bpf_prog_run(filter, skb);
} }
if (unlikely(!skb->tstamp && skb->tstamp_type)) if (unlikely(!skb->tstamp && skb->mono_delivery_time))
skb->tstamp_type = SKB_CLOCK_REALTIME; skb->mono_delivery_time = 0;
if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK) if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK)
skb_orphan(skb); skb_orphan(skb);

View File

@@ -104,8 +104,8 @@ TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb,
bpf_compute_data_pointers(skb); bpf_compute_data_pointers(skb);
filter_res = bpf_prog_run(prog->filter, skb); filter_res = bpf_prog_run(prog->filter, skb);
} }
if (unlikely(!skb->tstamp && skb->tstamp_type)) if (unlikely(!skb->tstamp && skb->mono_delivery_time))
skb->tstamp_type = SKB_CLOCK_REALTIME; skb->mono_delivery_time = 0;
if (prog->exts_integrated) { if (prog->exts_integrated) {
res->class = 0; res->class = 0;