aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/tun.c
diff options
context:
space:
mode:
authorAmos Kong <akong@redhat.com>2011-06-09 00:27:10 -0700
committerDavid S. Miller <davem@davemloft.net>2011-06-09 00:27:10 -0700
commit61a5ff15ebdab87887861a6b128b108404e4706d (patch)
treee439d62aa7299ad6644b37b3e569ca3de8c0f60b /drivers/net/tun.c
parent6f7c156c08d5eaa9fff2bd062f0a2b9d09a1e7a9 (diff)
tun: do not put self in waitq if doing a nonblock read
Perf shows a relatively high rate (about 8%) race in spin_lock_irqsave() when doing netperf between external host and guest. It's mainly becuase the lock contention between the tun_do_read() and tun_xmit_skb(), so this patch do not put self into waitqueue to reduce this kind of race. After this patch, it drops to 4%. Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Amos Kong <akong@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/tun.c')
-rw-r--r--drivers/net/tun.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2829badbae38..ef68e13c042d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -817,7 +817,8 @@ static ssize_t tun_do_read(struct tun_struct *tun,
tun_debug(KERN_INFO, tun, "tun_chr_read\n");
- add_wait_queue(&tun->wq.wait, &wait);
+ if (unlikely(!noblock))
+ add_wait_queue(&tun->wq.wait, &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
@@ -848,7 +849,8 @@ static ssize_t tun_do_read(struct tun_struct *tun,
}
current->state = TASK_RUNNING;
- remove_wait_queue(&tun->wq.wait, &wait);
+ if (unlikely(!noblock))
+ remove_wait_queue(&tun->wq.wait, &wait);
return ret;
}