aboutsummaryrefslogtreecommitdiff
path: root/fs/eventfd.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/eventfd.c')
-rw-r--r--fs/eventfd.c228
1 files changed, 228 insertions, 0 deletions
diff --git a/fs/eventfd.c b/fs/eventfd.c
new file mode 100644
index 000000000000..480e2b3c4166
--- /dev/null
+++ b/fs/eventfd.c
@@ -0,0 +1,228 @@
+/*
+ * fs/eventfd.c
+ *
+ * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#include <linux/file.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/anon_inodes.h>
+#include <linux/eventfd.h>
+
+struct eventfd_ctx {
+ spinlock_t lock;
+ wait_queue_head_t wqh;
+ /*
+ * Every time that a write(2) is performed on an eventfd, the
+ * value of the __u64 being written is added to "count" and a
+ * wakeup is performed on "wqh". A read(2) will return the "count"
+ * value to userspace, and will reset "count" to zero. The kernel
+ * size eventfd_signal() also, adds to the "count" counter and
+ * issue a wakeup.
+ */
+ __u64 count;
+};
+
+/*
+ * Adds "n" to the eventfd counter "count". Returns "n" in case of
+ * success, or a value lower then "n" in case of coutner overflow.
+ * This function is supposed to be called by the kernel in paths
+ * that do not allow sleeping. In this function we allow the counter
+ * to reach the ULLONG_MAX value, and we signal this as overflow
+ * condition by returining a POLLERR to poll(2).
+ */
+int eventfd_signal(struct file *file, int n)
+{
+ struct eventfd_ctx *ctx = file->private_data;
+ unsigned long flags;
+
+ if (n < 0)
+ return -EINVAL;
+ spin_lock_irqsave(&ctx->lock, flags);
+ if (ULLONG_MAX - ctx->count < n)
+ n = (int) (ULLONG_MAX - ctx->count);
+ ctx->count += n;
+ if (waitqueue_active(&ctx->wqh))
+ wake_up_locked(&ctx->wqh);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ return n;
+}
+
+static int eventfd_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static unsigned int eventfd_poll(struct file *file, poll_table *wait)
+{
+ struct eventfd_ctx *ctx = file->private_data;
+ unsigned int events = 0;
+ unsigned long flags;
+
+ poll_wait(file, &ctx->wqh, wait);
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ if (ctx->count > 0)
+ events |= POLLIN;
+ if (ctx->count == ULLONG_MAX)
+ events |= POLLERR;
+ if (ULLONG_MAX - 1 > ctx->count)
+ events |= POLLOUT;
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ return events;
+}
+
+static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct eventfd_ctx *ctx = file->private_data;
+ ssize_t res;
+ __u64 ucnt;
+ DECLARE_WAITQUEUE(wait, current);
+
+ if (count < sizeof(ucnt))
+ return -EINVAL;
+ spin_lock_irq(&ctx->lock);
+ res = -EAGAIN;
+ ucnt = ctx->count;
+ if (ucnt > 0)
+ res = sizeof(ucnt);
+ else if (!(file->f_flags & O_NONBLOCK)) {
+ __add_wait_queue(&ctx->wqh, &wait);
+ for (res = 0;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (ctx->count > 0) {
+ ucnt = ctx->count;
+ res = sizeof(ucnt);
+ break;
+ }
+ if (signal_pending(current)) {
+ res = -ERESTARTSYS;
+ break;
+ }
+ spin_unlock_irq(&ctx->lock);
+ schedule();
+ spin_lock_irq(&ctx->lock);
+ }
+ __remove_wait_queue(&ctx->wqh, &wait);
+ __set_current_state(TASK_RUNNING);
+ }
+ if (res > 0) {
+ ctx->count = 0;
+ if (waitqueue_active(&ctx->wqh))
+ wake_up_locked(&ctx->wqh);
+ }
+ spin_unlock_irq(&ctx->lock);
+ if (res > 0 && put_user(ucnt, (__u64 __user *) buf))
+ return -EFAULT;
+
+ return res;
+}
+
+static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct eventfd_ctx *ctx = file->private_data;
+ ssize_t res;
+ __u64 ucnt;
+ DECLARE_WAITQUEUE(wait, current);
+
+ if (count < sizeof(ucnt))
+ return -EINVAL;
+ if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
+ return -EFAULT;
+ if (ucnt == ULLONG_MAX)
+ return -EINVAL;
+ spin_lock_irq(&ctx->lock);
+ res = -EAGAIN;
+ if (ULLONG_MAX - ctx->count > ucnt)
+ res = sizeof(ucnt);
+ else if (!(file->f_flags & O_NONBLOCK)) {
+ __add_wait_queue(&ctx->wqh, &wait);
+ for (res = 0;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (ULLONG_MAX - ctx->count > ucnt) {
+ res = sizeof(ucnt);
+ break;
+ }
+ if (signal_pending(current)) {
+ res = -ERESTARTSYS;
+ break;
+ }
+ spin_unlock_irq(&ctx->lock);
+ schedule();
+ spin_lock_irq(&ctx->lock);
+ }
+ __remove_wait_queue(&ctx->wqh, &wait);
+ __set_current_state(TASK_RUNNING);
+ }
+ if (res > 0) {
+ ctx->count += ucnt;
+ if (waitqueue_active(&ctx->wqh))
+ wake_up_locked(&ctx->wqh);
+ }
+ spin_unlock_irq(&ctx->lock);
+
+ return res;
+}
+
+static const struct file_operations eventfd_fops = {
+ .release = eventfd_release,
+ .poll = eventfd_poll,
+ .read = eventfd_read,
+ .write = eventfd_write,
+};
+
+struct file *eventfd_fget(int fd)
+{
+ struct file *file;
+
+ file = fget(fd);
+ if (!file)
+ return ERR_PTR(-EBADF);
+ if (file->f_op != &eventfd_fops) {
+ fput(file);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return file;
+}
+
+asmlinkage long sys_eventfd(unsigned int count)
+{
+ int error, fd;
+ struct eventfd_ctx *ctx;
+ struct file *file;
+ struct inode *inode;
+
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ init_waitqueue_head(&ctx->wqh);
+ spin_lock_init(&ctx->lock);
+ ctx->count = count;
+
+ /*
+ * When we call this, the initialization must be complete, since
+ * anon_inode_getfd() will install the fd.
+ */
+ error = anon_inode_getfd(&fd, &inode, &file, "[eventfd]",
+ &eventfd_fops, ctx);
+ if (!error)
+ return fd;
+
+ kfree(ctx);
+ return error;
+}
+