aboutsummaryrefslogtreecommitdiff
path: root/drivers/misc/ubench.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/ubench.c')
-rw-r--r--drivers/misc/ubench.c246
1 files changed, 246 insertions, 0 deletions
diff --git a/drivers/misc/ubench.c b/drivers/misc/ubench.c
new file mode 100644
index 000000000000..89220a577759
--- /dev/null
+++ b/drivers/misc/ubench.c
@@ -0,0 +1,246 @@
+/*
+ * ubench.c
+ *
+ * The sole purpose of this module is to help with debugging of system
+ * debug tools.
+ *
+ * Copyright (C) 2015 Linaro Limited
+ * Daniel Thompson <daniel.thompson@linaro.org>
+ */
+
+#define pr_fmt(fmt) "ubench[%u]: " fmt, smp_processor_id()
+
+#include <linux/smp.h>
+#include <linux/debugfs.h>
+#include <linux/debug_locks.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/irq_work.h>
+
+
+static int ubench_do_action_on_cpu(int cpu, long (*action)(void *))
+{
+ if (cpu == -1)
+ (void) action(NULL);
+
+ if (cpu < 0 || cpu > num_possible_cpus())
+ return -EINVAL;
+
+ pr_info("About to run %pf on cpu %d\n", action, cpu);
+
+ /* work_on_cpu() ends up performing an uninterruptible
+ * wait-for-completion. This means we'll lose the prompt
+ * whilst things run regardless of the CPU we send the
+ * work to.
+ */
+ work_on_cpu(cpu, action, NULL);
+
+ return 0;
+}
+
+#define DEFINE_UBENCH_ATTRIBUTE(__fops, __action) \
+ static int __fops##_get(void *data, u64 *val) \
+ { \
+ *val = __action(NULL); \
+ return 0; \
+ } \
+ static int __fops##_set(void *data, u64 val) \
+ { \
+ ubench_do_action_on_cpu(val, __action); \
+ return 0; \
+ } \
+ DEFINE_SIMPLE_ATTRIBUTE(__fops, __fops##_get, __fops##_set, "%llu\n")
+
+static long do_ubench_local_irq_disable(void *info)
+{
+ int i;
+ unsigned long long t = sched_clock();
+
+ for (i = 0; i < 1000000; i++) {
+ local_irq_disable();
+ local_irq_enable();
+ }
+
+ pr_info("local_irq_disable %llu\n", sched_clock() - t);
+ return 0;
+}
+
+DEFINE_UBENCH_ATTRIBUTE(ubench_local_irq_disable_fops,
+ do_ubench_local_irq_disable);
+
+static long do_ubench_local_irq_save(void *info)
+{
+ int i;
+ unsigned long flags;
+ unsigned long long t = sched_clock();
+
+ for (i = 0; i < 1000000; i++) {
+ local_irq_save(flags);
+ local_irq_restore(flags);
+ }
+
+ pr_info("local_irq_save %llu\n", sched_clock() - t);
+ return 0;
+}
+
+DEFINE_UBENCH_ATTRIBUTE(ubench_local_irq_save_fops, do_ubench_local_irq_save);
+
+static DEFINE_SPINLOCK(ubench_lock);
+
+static long do_ubench_spin_lock_irq(void *info)
+{
+ int i;
+ unsigned long long t = sched_clock();
+
+ for (i = 0; i < 1000000; i++) {
+ spin_lock_irq(&ubench_lock);
+ spin_unlock_irq(&ubench_lock);
+ }
+
+ pr_info("spin_lock_irq %llu\n", sched_clock() - t);
+ return 0;
+}
+
+DEFINE_UBENCH_ATTRIBUTE(ubench_spin_lock_irq_fops, do_ubench_spin_lock_irq);
+
+static long do_ubench_spin_lock_irqsave(void *info)
+{
+ int i;
+ unsigned long flags;
+ unsigned long long t = sched_clock();
+
+ for (i = 0; i < 1000000; i++) {
+ spin_lock_irqsave(&ubench_lock, flags);
+ spin_unlock_irqrestore(&ubench_lock, flags);
+ }
+
+ pr_info("spin_lock_irqsave %llu\n", sched_clock() - t);
+ return 0;
+}
+
+DEFINE_UBENCH_ATTRIBUTE(ubench_spin_lock_irqsave_fops,
+ do_ubench_spin_lock_irqsave);
+
+static DEFINE_RWLOCK(ubench_rwlock);
+
+static long do_ubench_read_lock_irq(void *info)
+{
+ int i;
+ unsigned long long t = sched_clock();
+
+ for (i = 0; i < 1000000; i++) {
+ read_lock_irq(&ubench_rwlock);
+ read_unlock_irq(&ubench_rwlock);
+ }
+
+ pr_info("read_lock_irq %llu\n", sched_clock() - t);
+ return 0;
+}
+
+DEFINE_UBENCH_ATTRIBUTE(ubench_read_lock_irq_fops, do_ubench_read_lock_irq);
+
+static long do_ubench_read_lock_irqsave(void *info)
+{
+ int i;
+ unsigned long flags;
+ unsigned long long t = sched_clock();
+
+ for (i = 0; i < 1000000; i++) {
+ read_lock_irqsave(&ubench_rwlock, flags);
+ read_unlock_irqrestore(&ubench_rwlock, flags);
+ }
+
+ pr_info("read_lock_irqsave %llu\n", sched_clock() - t);
+ return 0;
+}
+
+DEFINE_UBENCH_ATTRIBUTE(ubench_read_lock_irqsave_fops,
+ do_ubench_read_lock_irqsave);
+
+static long do_ubench_write_lock_irq(void *info)
+{
+ int i;
+ unsigned long long t = sched_clock();
+
+ for (i = 0; i < 1000000; i++) {
+ write_lock_irq(&ubench_rwlock);
+ write_unlock_irq(&ubench_rwlock);
+ }
+
+ pr_info("write_lock_irq %llu\n", sched_clock() - t);
+ return 0;
+}
+
+DEFINE_UBENCH_ATTRIBUTE(ubench_write_lock_irq_fops, do_ubench_write_lock_irq);
+
+static long do_ubench_write_lock_irqsave(void *info)
+{
+ int i;
+ unsigned long flags;
+ unsigned long long t = sched_clock();
+
+ for (i = 0; i < 1000000; i++) {
+ write_lock_irqsave(&ubench_rwlock, flags);
+ write_unlock_irqrestore(&ubench_rwlock, flags);
+ }
+
+ pr_info("write_lock_irqsave %llu\n", sched_clock() - t);
+ return 0;
+}
+
+DEFINE_UBENCH_ATTRIBUTE(ubench_write_lock_irqsave_fops,
+ do_ubench_write_lock_irqsave);
+
+static long do_ubench_all(void *info)
+{
+ do_ubench_local_irq_disable(NULL);
+ do_ubench_local_irq_save(NULL);
+ do_ubench_spin_lock_irq(NULL);
+ do_ubench_spin_lock_irqsave(NULL);
+ do_ubench_read_lock_irq(NULL);
+ do_ubench_read_lock_irqsave(NULL);
+ do_ubench_write_lock_irq(NULL);
+ do_ubench_write_lock_irqsave(NULL);
+
+ return 0;
+}
+
+DEFINE_UBENCH_ATTRIBUTE(ubench_all_fops, do_ubench_all);
+
+#define E(x) { #x, &ubench_##x##_fops }
+const struct
+{
+ const char *name;
+ const struct file_operations *fops;
+} ubench_fops_table[] = {
+ E(local_irq_disable),
+ E(local_irq_save),
+ E(spin_lock_irq),
+ E(spin_lock_irqsave),
+ E(read_lock_irq),
+ E(read_lock_irqsave),
+ E(write_lock_irq),
+ E(write_lock_irqsave),
+ E(all),
+};
+#undef E
+
+static int __init ubench_init(void)
+{
+ struct dentry *dir;
+ unsigned int i;
+
+ dir = debugfs_create_dir("ubench", NULL);
+ if (dir)
+ for (i = 0; i < ARRAY_SIZE(ubench_fops_table); i++)
+ (void)debugfs_create_file(ubench_fops_table[i].name,
+ S_IRUGO | S_IWUSR, dir, NULL,
+ ubench_fops_table[i].fops);
+
+ return 0;
+}
+
+module_init(ubench_init);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Thompson");