aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/backing-dev.h11
-rw-r--r--include/linux/blkdev.h14
-rw-r--r--include/linux/crash_dump.h1
-rw-r--r--include/linux/elfcore.h5
-rw-r--r--include/linux/hardirq.h2
-rw-r--r--include/linux/input/matrix_keypad.h65
-rw-r--r--include/linux/mnt_namespace.h13
-rw-r--r--include/linux/personality.h5
-rw-r--r--include/linux/quotaops.h1
-rw-r--r--include/linux/rfkill.h1
-rw-r--r--include/linux/sched.h9
-rw-r--r--include/linux/spinlock.h5
-rw-r--r--include/linux/sunrpc/xdr.h1
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/usb/serial.h3
-rw-r--r--include/net/sock.h69
16 files changed, 175 insertions, 32 deletions
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 0ec2c594868e..1d52425a6118 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -229,9 +229,14 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
(1 << BDI_async_congested));
}
-void clear_bdi_congested(struct backing_dev_info *bdi, int rw);
-void set_bdi_congested(struct backing_dev_info *bdi, int rw);
-long congestion_wait(int rw, long timeout);
+enum {
+ BLK_RW_ASYNC = 0,
+ BLK_RW_SYNC = 1,
+};
+
+void clear_bdi_congested(struct backing_dev_info *bdi, int sync);
+void set_bdi_congested(struct backing_dev_info *bdi, int sync);
+long congestion_wait(int sync, long timeout);
static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 49ae07951d55..e7cb5dbf6c26 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -70,11 +70,6 @@ enum rq_cmd_type_bits {
REQ_TYPE_ATA_PC,
};
-enum {
- BLK_RW_ASYNC = 0,
- BLK_RW_SYNC = 1,
-};
-
/*
* For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
* sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
@@ -723,6 +718,7 @@ struct rq_map_data {
int nr_entries;
unsigned long offset;
int null_mapped;
+ int from_user;
};
struct req_iterator {
@@ -779,18 +775,18 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
* congested queues, and wake up anyone who was waiting for requests to be
* put back.
*/
-static inline void blk_clear_queue_congested(struct request_queue *q, int rw)
+static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
{
- clear_bdi_congested(&q->backing_dev_info, rw);
+ clear_bdi_congested(&q->backing_dev_info, sync);
}
/*
* A queue has just entered congestion. Flag that in the queue's VM-visible
* state flags and increment the global gounter of congested queues.
*/
-static inline void blk_set_queue_congested(struct request_queue *q, int rw)
+static inline void blk_set_queue_congested(struct request_queue *q, int sync)
{
- set_bdi_congested(&q->backing_dev_info, rw);
+ set_bdi_congested(&q->backing_dev_info, sync);
}
extern void blk_start_queue(struct request_queue *q);
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 2dac064d8359..0026f267da20 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -3,7 +3,6 @@
#ifdef CONFIG_CRASH_DUMP
#include <linux/kexec.h>
-#include <linux/smp_lock.h>
#include <linux/device.h>
#include <linux/proc_fs.h>
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index 03ec16779802..00d6a68d0421 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -122,10 +122,9 @@ static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_r
static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
{
-#ifdef ELF_CORE_COPY_TASK_REGS
-
+#if defined (ELF_CORE_COPY_TASK_REGS)
return ELF_CORE_COPY_TASK_REGS(t, elfregs);
-#else
+#elif defined (task_pt_regs)
elf_core_copy_regs(elfregs, task_pt_regs(t));
#endif
return 0;
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 45257475623c..8246c697863d 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -2,7 +2,9 @@
#define LINUX_HARDIRQ_H
#include <linux/preempt.h>
+#ifdef CONFIG_PREEMPT
#include <linux/smp_lock.h>
+#endif
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
#include <asm/hardirq.h>
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
new file mode 100644
index 000000000000..7964516c6954
--- /dev/null
+++ b/include/linux/input/matrix_keypad.h
@@ -0,0 +1,65 @@
+#ifndef _MATRIX_KEYPAD_H
+#define _MATRIX_KEYPAD_H
+
+#include <linux/types.h>
+#include <linux/input.h>
+
+#define MATRIX_MAX_ROWS 16
+#define MATRIX_MAX_COLS 16
+
+#define KEY(row, col, val) ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\
+ (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\
+ (val & 0xffff))
+
+#define KEY_ROW(k) (((k) >> 24) & 0xff)
+#define KEY_COL(k) (((k) >> 16) & 0xff)
+#define KEY_VAL(k) ((k) & 0xffff)
+
+/**
+ * struct matrix_keymap_data - keymap for matrix keyboards
+ * @keymap: pointer to array of uint32 values encoded with KEY() macro
+ * representing keymap
+ * @keymap_size: number of entries (initialized) in this keymap
+ * @max_keymap_size: maximum size of keymap supported by the device
+ *
+ * This structure is supposed to be used by platform code to supply
+ * keymaps to drivers that implement matrix-like keypads/keyboards.
+ */
+struct matrix_keymap_data {
+ const uint32_t *keymap;
+ unsigned int keymap_size;
+ unsigned int max_keymap_size;
+};
+
+/**
+ * struct matrix_keypad_platform_data - platform-dependent keypad data
+ * @keymap_data: pointer to &matrix_keymap_data
+ * @row_gpios: array of gpio numbers reporesenting rows
+ * @col_gpios: array of gpio numbers reporesenting colums
+ * @num_row_gpios: actual number of row gpios used by device
+ * @num_col_gpios: actual number of col gpios used by device
+ * @col_scan_delay_us: delay, measured in microseconds, that is
+ * needed before we can keypad after activating column gpio
+ * @debounce_ms: debounce interval in milliseconds
+ *
+ * This structure represents platform-specific data that use used by
+ * matrix_keypad driver to perform proper initialization.
+ */
+struct matrix_keypad_platform_data {
+ const struct matrix_keymap_data *keymap_data;
+
+ unsigned int row_gpios[MATRIX_MAX_ROWS];
+ unsigned int col_gpios[MATRIX_MAX_COLS];
+ unsigned int num_row_gpios;
+ unsigned int num_col_gpios;
+
+ unsigned int col_scan_delay_us;
+
+ /* key debounce interval in milli-second */
+ unsigned int debounce_ms;
+
+ bool active_low;
+ bool wakeup;
+};
+
+#endif /* _MATRIX_KEYPAD_H */
diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h
index 3beb2592b03f..d74785c2393a 100644
--- a/include/linux/mnt_namespace.h
+++ b/include/linux/mnt_namespace.h
@@ -2,10 +2,9 @@
#define _NAMESPACE_H_
#ifdef __KERNEL__
-#include <linux/mount.h>
-#include <linux/sched.h>
-#include <linux/nsproxy.h>
+#include <linux/path.h>
#include <linux/seq_file.h>
+#include <linux/wait.h>
struct mnt_namespace {
atomic_t count;
@@ -28,14 +27,6 @@ extern struct mnt_namespace *create_mnt_ns(struct vfsmount *mnt);
extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *,
struct fs_struct *);
extern void put_mnt_ns(struct mnt_namespace *ns);
-
-static inline void exit_mnt_ns(struct task_struct *p)
-{
- struct mnt_namespace *ns = p->nsproxy->mnt_ns;
- if (ns)
- put_mnt_ns(ns);
-}
-
static inline void get_mnt_ns(struct mnt_namespace *ns)
{
atomic_inc(&ns->count);
diff --git a/include/linux/personality.h b/include/linux/personality.h
index a84e9ff9b27e..126120819a0d 100644
--- a/include/linux/personality.h
+++ b/include/linux/personality.h
@@ -40,7 +40,10 @@ enum {
* Security-relevant compatibility flags that must be
* cleared upon setuid or setgid exec:
*/
-#define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC|ADDR_NO_RANDOMIZE)
+#define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
+ ADDR_NO_RANDOMIZE | \
+ ADDR_COMPAT_LAYOUT | \
+ MMAP_PAGE_ZERO)
/*
* Personality types.
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 7bc457593684..26361c4c037a 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -7,7 +7,6 @@
#ifndef _LINUX_QUOTAOPS_
#define _LINUX_QUOTAOPS_
-#include <linux/smp_lock.h>
#include <linux/fs.h>
static inline struct quota_info *sb_dqopt(struct super_block *sb)
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index e73e2429a1b1..2ce29831feb6 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -99,7 +99,6 @@ enum rfkill_user_states {
#undef RFKILL_STATE_UNBLOCKED
#undef RFKILL_STATE_HARD_BLOCKED
-#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 0085d758d645..16a982e389fb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -498,6 +498,15 @@ struct task_cputime {
.sum_exec_runtime = 0, \
}
+/*
+ * Disable preemption until the scheduler is running.
+ * Reset by start_kernel()->sched_init()->init_idle().
+ *
+ * We include PREEMPT_ACTIVE to avoid cond_resched() from working
+ * before the scheduler is active -- see should_resched().
+ */
+#define INIT_PREEMPT_COUNT (1 + PREEMPT_ACTIVE)
+
/**
* struct thread_group_cputimer - thread group interval timer counts
* @cputime: thread group interval timers.
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 252b245cfcf4..4be57ab03478 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -132,6 +132,11 @@ do { \
#endif /*__raw_spin_is_contended*/
#endif
+/* The lock does not imply full memory barrier. */
+#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
+static inline void smp_mb__after_lock(void) { smp_mb(); }
+#endif
+
/**
* spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index d8910b68e1bd..b99c625fddfe 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -12,7 +12,6 @@
#include <linux/uio.h>
#include <asm/byteorder.h>
#include <linux/scatterlist.h>
-#include <linux/smp_lock.h>
/*
* Buffer adjustment
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index fa4242cdade8..80de7003d8c2 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -321,6 +321,8 @@ asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese,
siginfo_t __user *uinfo,
const struct timespec __user *uts,
size_t sigsetsize);
+asmlinkage long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
+ siginfo_t __user *uinfo);
asmlinkage long sys_kill(int pid, int sig);
asmlinkage long sys_tgkill(int tgid, int pid, int sig);
asmlinkage long sys_tkill(int pid, int sig);
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 44801d26a37a..0ec50ba62139 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -317,7 +317,8 @@ extern int usb_serial_generic_register(int debug);
extern void usb_serial_generic_deregister(void);
extern void usb_serial_generic_resubmit_read_urb(struct usb_serial_port *port,
gfp_t mem_flags);
-extern int usb_serial_handle_sysrq_char(struct usb_serial_port *port,
+extern int usb_serial_handle_sysrq_char(struct tty_struct *tty,
+ struct usb_serial_port *port,
unsigned int ch);
extern int usb_serial_handle_break(struct usb_serial_port *port);
diff --git a/include/net/sock.h b/include/net/sock.h
index 352f06bbd7a9..2c0da9239b95 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -54,6 +54,7 @@
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
+#include <linux/poll.h>
#include <asm/atomic.h>
#include <net/dst.h>
@@ -1241,6 +1242,74 @@ static inline int sk_has_allocations(const struct sock *sk)
return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
}
+/**
+ * sk_has_sleeper - check if there are any waiting processes
+ * @sk: socket
+ *
+ * Returns true if socket has waiting processes
+ *
+ * The purpose of the sk_has_sleeper and sock_poll_wait is to wrap the memory
+ * barrier call. They were added due to the race found within the tcp code.
+ *
+ * Consider following tcp code paths:
+ *
+ * CPU1 CPU2
+ *
+ * sys_select receive packet
+ * ... ...
+ * __add_wait_queue update tp->rcv_nxt
+ * ... ...
+ * tp->rcv_nxt check sock_def_readable
+ * ... {
+ * schedule ...
+ * if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ * wake_up_interruptible(sk->sk_sleep)
+ * ...
+ * }
+ *
+ * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
+ * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1
+ * could then endup calling schedule and sleep forever if there are no more
+ * data on the socket.
+ *
+ * The sk_has_sleeper is always called right after a call to read_lock, so we
+ * can use smp_mb__after_lock barrier.
+ */
+static inline int sk_has_sleeper(struct sock *sk)
+{
+ /*
+ * We need to be sure we are in sync with the
+ * add_wait_queue modifications to the wait queue.
+ *
+ * This memory barrier is paired in the sock_poll_wait.
+ */
+ smp_mb__after_lock();
+ return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
+}
+
+/**
+ * sock_poll_wait - place memory barrier behind the poll_wait call.
+ * @filp: file
+ * @wait_address: socket wait queue
+ * @p: poll_table
+ *
+ * See the comments in the sk_has_sleeper function.
+ */
+static inline void sock_poll_wait(struct file *filp,
+ wait_queue_head_t *wait_address, poll_table *p)
+{
+ if (p && wait_address) {
+ poll_wait(filp, wait_address, p);
+ /*
+ * We need to be sure we are in sync with the
+ * socket flags modification.
+ *
+ * This memory barrier is paired in the sk_has_sleeper.
+ */
+ smp_mb();
+ }
+}
+
/*
* Queue a received datagram if it will fit. Stream and sequenced
* protocols can't normally use this as they need to fit buffers in