aboutsummaryrefslogtreecommitdiff
path: root/net/sunrpc
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@citi.umich.edu>2009-08-06 15:41:34 -0400
committerJ. Bruce Fields <bfields@citi.umich.edu>2009-11-23 12:34:05 -0500
commit78c210efdefe07131f91ed512a3308b15bb14e2f (patch)
tree8b101c92584e27b0dbe238f6a1208308e54dac81 /net/sunrpc
parent0a3adadee42f2865bb867b8c5f4955b7def9baad (diff)
Revert "knfsd: avoid overloading the CPU scheduler with enormous load averages"
This reverts commit 59a252ff8c0f2fa32c896f69d56ae33e641ce7ad. This helps in an entirely cached workload but not necessarily in workloads that require waiting on disk. Conflicts: include/linux/sunrpc/svc.h net/sunrpc/svc_xprt.c Reported-by: Simon Kirby <sim@hostway.ca> Tested-by: Jesper Krogh <jesper@krogh.cc> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/svc_xprt.c31
1 files changed, 9 insertions, 22 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index df124f78ee4..2c58b75a236 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -16,8 +16,6 @@
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
-#define SVC_MAX_WAKING 5
-
static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt);
static int svc_deferred_recv(struct svc_rqst *rqstp);
static struct cache_deferred_req *svc_defer(struct cache_req *req);
@@ -306,7 +304,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
struct svc_pool *pool;
struct svc_rqst *rqstp;
int cpu;
- int thread_avail;
if (!(xprt->xpt_flags &
((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
@@ -318,6 +315,12 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
spin_lock_bh(&pool->sp_lock);
+ if (!list_empty(&pool->sp_threads) &&
+ !list_empty(&pool->sp_sockets))
+ printk(KERN_ERR
+ "svc_xprt_enqueue: "
+ "threads and transports both waiting??\n");
+
if (test_bit(XPT_DEAD, &xprt->xpt_flags)) {
/* Don't enqueue dead transports */
dprintk("svc: transport %p is dead, not enqueued\n", xprt);
@@ -358,15 +361,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
}
process:
- /* Work out whether threads are available */
- thread_avail = !list_empty(&pool->sp_threads); /* threads are asleep */
- if (pool->sp_nwaking >= SVC_MAX_WAKING) {
- /* too many threads are runnable and trying to wake up */
- thread_avail = 0;
- pool->sp_stats.overloads_avoided++;
- }
-
- if (thread_avail) {
+ if (!list_empty(&pool->sp_threads)) {
rqstp = list_entry(pool->sp_threads.next,
struct svc_rqst,
rq_list);
@@ -381,8 +376,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
svc_xprt_get(xprt);
rqstp->rq_reserved = serv->sv_max_mesg;
atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved);
- rqstp->rq_waking = 1;
- pool->sp_nwaking++;
pool->sp_stats.threads_woken++;
BUG_ON(xprt->xpt_pool != pool);
wake_up(&rqstp->rq_wait);
@@ -651,11 +644,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
return -EINTR;
spin_lock_bh(&pool->sp_lock);
- if (rqstp->rq_waking) {
- rqstp->rq_waking = 0;
- pool->sp_nwaking--;
- BUG_ON(pool->sp_nwaking < 0);
- }
xprt = svc_xprt_dequeue(pool);
if (xprt) {
rqstp->rq_xprt = xprt;
@@ -1204,16 +1192,15 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
struct svc_pool *pool = p;
if (p == SEQ_START_TOKEN) {
- seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken overloads-avoided threads-timedout\n");
+ seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n");
return 0;
}
- seq_printf(m, "%u %lu %lu %lu %lu %lu\n",
+ seq_printf(m, "%u %lu %lu %lu %lu\n",
pool->sp_id,
pool->sp_stats.packets,
pool->sp_stats.sockets_queued,
pool->sp_stats.threads_woken,
- pool->sp_stats.overloads_avoided,
pool->sp_stats.threads_timedout);
return 0;