FROMGIT: vhost/vsock: Allocate nonlinear SKBs for handling large receive buffers

When receiving a packet from a guest, vhost_vsock_handle_tx_kick()
calls vhost_vsock_alloc_linear_skb() to allocate and fill an SKB with
the receive data. Unfortunately, these are always linear allocations and
can therefore result in significant pressure on kmalloc() considering
that the maximum packet size (VIRTIO_VSOCK_MAX_PKT_BUF_SIZE +
VIRTIO_VSOCK_SKB_HEADROOM) is a little over 64KiB, resulting in a 128KiB
allocation for each packet.

Rework the vsock SKB allocation so that, for sizes with page order
greater than PAGE_ALLOC_COSTLY_ORDER, a nonlinear SKB is allocated
instead with the packet header in the SKB and the receive data in the
fragments. Finally, add a debug warning if virtio_vsock_skb_rx_put() is
ever called on an SKB with a non-zero length, as this would be
destructive for the nonlinear case.

Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
Signed-off-by: Will Deacon <will@kernel.org>
Message-Id: <20250717090116.11987-8-will@kernel.org>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
(cherry picked from commit 4243469b093ac35f54c3a0414812a5b97313e149
 git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git vhost)
Bug: 421244320
Signed-off-by: Will Deacon <willdeacon@google.com>
Change-Id: I4212a8daf9f19b5bbffc06ce93338c823de7bb19
This commit is contained in:
Will Deacon
2025-07-17 10:01:14 +01:00
committed by Isaac Manjarres
parent 5044ed745e
commit b2fe83cd4d
2 changed files with 36 additions and 12 deletions

View File

@@ -349,7 +349,7 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
return NULL;
/* len contains both payload and hdr */
skb = virtio_vsock_alloc_linear_skb(len, GFP_KERNEL);
skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
if (!skb)
return NULL;
@@ -378,10 +378,8 @@ vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
virtio_vsock_skb_rx_put(skb, payload_len);
nbytes = copy_from_iter(skb->data, payload_len, &iov_iter);
if (nbytes != payload_len) {
vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
payload_len, nbytes);
if (skb_copy_datagram_from_iter(skb, 0, &iov_iter, payload_len)) {
vq_err(vq, "Failed to copy %zu byte payload\n", payload_len);
kfree_skb(skb);
return NULL;
}

View File

@@ -49,20 +49,46 @@ static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb)
static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb, u32 len)
{
DEBUG_NET_WARN_ON_ONCE(skb->len);
if (skb_is_nonlinear(skb))
skb->len = len;
else
skb_put(skb, len);
}
static inline struct sk_buff *
__virtio_vsock_alloc_skb_with_frags(unsigned int header_len,
unsigned int data_len,
gfp_t mask)
{
struct sk_buff *skb;
int err;
skb = alloc_skb_with_frags(header_len, data_len,
PAGE_ALLOC_COSTLY_ORDER, &err, mask);
if (!skb)
return NULL;
skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
skb->data_len = data_len;
return skb;
}
static inline struct sk_buff *
virtio_vsock_alloc_linear_skb(unsigned int size, gfp_t mask)
{
struct sk_buff *skb;
return __virtio_vsock_alloc_skb_with_frags(size, 0, mask);
}
skb = alloc_skb(size, mask);
if (!skb)
return NULL;
static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask)
{
if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
return virtio_vsock_alloc_linear_skb(size, mask);
skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM);
return skb;
size -= VIRTIO_VSOCK_SKB_HEADROOM;
return __virtio_vsock_alloc_skb_with_frags(VIRTIO_VSOCK_SKB_HEADROOM,
size, mask);
}
static inline void