path: root/kernel/rcutree_trace.c
diff options
authorMichael Wang <wangyun@linux.vnet.ibm.com>2012-09-20 08:51:03 +0800
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-11-08 11:50:15 -0800
commit374b928ee8061fdbb0b527fb3924080ba2437767 (patch)
tree7e1cc1bd9b8747e87413585ef1cb79196a550b59 /kernel/rcutree_trace.c
parent573bcd40d221bd6d7cebf27dee120bd242f5feb5 (diff)
rcu: Fundamental facility for 'CPU units sequence reading'
This patch add the fundamental facility used by the following patches, so we can implement the 'CPU units sequence reading' later. This helps us avoid losing data when there are too many CPUs and too small of a buffer, since this new approach allows userspace to read out the data one CPU at a time. Thus, if the buffer is not large enough, userspace will get whatever CPUs fit, and can then issue another read for the remainder of the data. Signed-off-by: Michael Wang <wangyun@linux.vnet.ibm.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcutree_trace.c')
1 files changed, 30 insertions, 0 deletions
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 62223a27f98..0dfe9b512f0 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -46,6 +46,36 @@
#include "rcutree.h"
+static int r_open(struct inode *inode, struct file *file,
+ const struct seq_operations *op)
+ int ret = seq_open(file, op);
+ if (!ret) {
+ struct seq_file *m = (struct seq_file *)file->private_data;
+ m->private = inode->i_private;
+ }
+ return ret;
+static void *r_start(struct seq_file *m, loff_t *pos)
+ struct rcu_state *rsp = (struct rcu_state *)m->private;
+ *pos = cpumask_next(*pos - 1, cpu_possible_mask);
+ if ((*pos) < nr_cpu_ids)
+ return per_cpu_ptr(rsp->rda, *pos);
+ return NULL;
+static void *r_next(struct seq_file *m, void *v, loff_t *pos)
+ (*pos)++;
+ return r_start(m, pos);
+static void r_stop(struct seq_file *m, void *v)
static int show_rcubarrier(struct seq_file *m, void *unused)
struct rcu_state *rsp;