diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2018-06-22 21:20:35 +0200 | 
| commit | 7731b8bc94e599c9a79e428f3359ff2c34b7576a (patch) | |
| tree | 879f18ccbe274122f2d4f095b43cbc7f953e0ada /drivers/vhost/net.c | |
| parent | 48e315618dc4dc8904182cd221e3d395d5d97005 (diff) | |
| parent | 9ffc59d57228d74809700be6f7ecb1db10292f05 (diff) | |
Merge branch 'linus' into x86/urgent
Required to queue a dependent fix.
Diffstat (limited to 'drivers/vhost/net.c')
| -rw-r--r-- | drivers/vhost/net.c | 27 | 
1 files changed, 17 insertions, 10 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index eeaf6739215f..686dc670fd29 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -32,6 +32,7 @@  #include <linux/skbuff.h>  #include <net/sock.h> +#include <net/xdp.h>  #include "vhost.h" @@ -45,8 +46,10 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"  #define VHOST_NET_WEIGHT 0x80000  /* Max number of packets transferred before requeueing the job. - * Using this limit prevents one virtqueue from starving rx. */ -#define VHOST_NET_PKT_WEIGHT(vq) ((vq)->num * 2) + * Using this limit prevents one virtqueue from starving others with small + * pkts. + */ +#define VHOST_NET_PKT_WEIGHT 256  /* MAX number of TX used buffers for outstanding zerocopy */  #define VHOST_MAX_PEND 128 @@ -183,10 +186,10 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)  static int vhost_net_buf_peek_len(void *ptr)  { -	if (tun_is_xdp_buff(ptr)) { -		struct xdp_buff *xdp = tun_ptr_to_xdp(ptr); +	if (tun_is_xdp_frame(ptr)) { +		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr); -		return xdp->data_end - xdp->data; +		return xdpf->len;  	}  	return __skb_array_len_with_tag(ptr); @@ -271,8 +274,10 @@ static int vhost_net_set_ubuf_info(struct vhost_net *n)  		zcopy = vhost_net_zcopy_mask & (0x1 << i);  		if (!zcopy)  			continue; -		n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) * -					      UIO_MAXIOV, GFP_KERNEL); +		n->vqs[i].ubuf_info = +			kmalloc_array(UIO_MAXIOV, +				      sizeof(*n->vqs[i].ubuf_info), +				      GFP_KERNEL);  		if  (!n->vqs[i].ubuf_info)  			goto err;  	} @@ -588,7 +593,7 @@ static void handle_tx(struct vhost_net *net)  			vhost_zerocopy_signal_used(net, vq);  		vhost_net_tx_packet(net);  		if (unlikely(total_len >= VHOST_NET_WEIGHT) || -		    unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT(vq))) { +		    unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT)) {  			vhost_poll_queue(&vq->poll);  			break;  		} @@ -784,6 +789,7 @@ static void handle_rx(struct vhost_net *net)  	struct socket *sock;  	struct iov_iter fixup;  	__virtio16 num_buffers; +	int recv_pkts = 0;  	mutex_lock_nested(&vq->mutex, 0);  	sock = vq->private_data; @@ -884,7 +890,8 @@ static void handle_rx(struct vhost_net *net)  		if (unlikely(vq_log))  			vhost_log_write(vq, vq_log, log, vhost_len);  		total_len += vhost_len; -		if (unlikely(total_len >= VHOST_NET_WEIGHT)) { +		if (unlikely(total_len >= VHOST_NET_WEIGHT) || +		    unlikely(++recv_pkts >= VHOST_NET_PKT_WEIGHT)) {  			vhost_poll_queue(&vq->poll);  			goto out;  		} @@ -938,7 +945,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)  	n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);  	if (!n)  		return -ENOMEM; -	vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL); +	vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL);  	if (!vqs) {  		kvfree(n);  		return -ENOMEM;  | 
