aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Shi <alex.shi@linaro.org>2016-06-14 17:06:48 +0800
committerAlex Shi <alex.shi@linaro.org>2016-06-14 17:06:48 +0800
commit1beb6ea36b283c1dc34812c6ef9bf56a1e494f8c (patch)
tree539dc6f4b8c68f385a6457ef7b9d15570a0a8dcc
parent6c1be3bf831c5b1f2ba51425315287df2c350633 (diff)
parentb5076139991c6b12c62346d9880eec1d4227d99f (diff)
Merge tag 'v3.18.35' into linux-linaro-lsk-v3.18
This is the 3.18.35 stable release
-rw-r--r--Documentation/devicetree/bindings/crypto/samsung-sss.txt6
-rw-r--r--Makefile9
-rw-r--r--arch/arm/kvm/mmu.c17
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h5
-rw-r--r--arch/mips/ath79/early_printk.c6
-rw-r--r--arch/mips/include/asm/kvm_host.h2
-rw-r--r--arch/mips/include/asm/msa.h13
-rw-r--r--arch/mips/include/uapi/asm/siginfo.h18
-rw-r--r--arch/mips/kernel/process.c2
-rw-r--r--arch/mips/kernel/traps.c8
-rw-r--r--arch/mips/kvm/emulate.c89
-rw-r--r--arch/mips/kvm/trap_emul.c2
-rw-r--r--arch/mips/math-emu/cp1emu.c8
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h1
-rw-r--r--arch/powerpc/kernel/eeh_driver.c26
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S16
-rw-r--r--arch/sparc/include/asm/pgtable_64.h7
-rw-r--r--arch/x86/include/asm/acpi.h1
-rw-r--r--arch/x86/include/asm/pgtable.h5
-rw-r--r--arch/x86/pci/xen.c9
-rw-r--r--drivers/acpi/osl.c16
-rw-r--r--drivers/base/power/main.c5
-rw-r--r--drivers/base/power/runtime.c9
-rw-r--r--drivers/bluetooth/hci_vhci.c28
-rw-r--r--drivers/cpuidle/cpuidle.c4
-rw-r--r--drivers/crypto/caam/jr.c2
-rw-r--r--drivers/crypto/s5p-sss.c85
-rw-r--r--drivers/dma/imx-sdma.c7
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c5
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c2
-rw-r--r--drivers/input/misc/uinput.c6
-rw-r--r--drivers/irqchip/irq-gic-v3.c7
-rw-r--r--drivers/irqchip/irq-gic.c8
-rw-r--r--drivers/mcb/mcb-parse.c2
-rw-r--r--drivers/mfd/omap-usb-tll.c13
-rw-r--r--drivers/mmc/card/block.c5
-rw-r--r--drivers/mmc/core/core.c4
-rw-r--r--drivers/mmc/core/mmc.c7
-rw-r--r--drivers/mmc/host/sdhci-acpi.c7
-rw-r--r--drivers/mtd/ubi/eba.c23
-rw-r--r--drivers/mtd/ubi/fastmap.c1
-rw-r--r--drivers/mtd/ubi/ubi.h6
-rw-r--r--drivers/mtd/ubi/wl.c8
-rw-r--r--drivers/net/can/dev.c56
-rw-r--r--drivers/net/can/m_can/m_can.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c10
-rw-r--r--drivers/net/wireless/rtlwifi/base.c4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c8
-rw-r--r--drivers/pci/probe.c6
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos5440.c15
-rw-r--r--drivers/regulator/core.c106
-rw-r--r--drivers/scsi/aacraid/commsup.c12
-rw-r--r--drivers/thunderbolt/eeprom.c1
-rw-r--r--drivers/tty/n_gsm.c4
-rw-r--r--drivers/tty/serial/ucc_uart.c3
-rw-r--r--drivers/usb/core/driver.c40
-rw-r--r--drivers/usb/core/hcd.c15
-rw-r--r--drivers/usb/core/hub.c8
-rw-r--r--drivers/usb/misc/usbtest.c37
-rw-r--r--drivers/usb/serial/cp210x.c45
-rw-r--r--drivers/usb/serial/io_edgeport.c56
-rw-r--r--drivers/usb/serial/keyspan.c4
-rw-r--r--drivers/usb/serial/mxuport.c10
-rw-r--r--drivers/usb/serial/option.c155
-rw-r--r--drivers/usb/serial/quatech2.c1
-rw-r--r--drivers/xen/events/events_base.c6
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/file.c2
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/btrfs/ioctl.c21
-rw-r--r--fs/cifs/cifs_spnego.c67
-rw-r--r--fs/cifs/cifsfs.c4
-rw-r--r--fs/cifs/cifsproto.h2
-rw-r--r--fs/cifs/sess.c139
-rw-r--r--fs/cifs/smb2glob.h1
-rw-r--r--fs/cifs/smb2inode.c8
-rw-r--r--fs/cifs/smb2pdu.c16
-rw-r--r--fs/cifs/smb2proto.h2
-rw-r--r--fs/ext4/ialloc.c56
-rw-r--r--fs/ext4/mballoc.c10
-rw-r--r--fs/ext4/namei.c2
-rw-r--r--fs/proc/task_mmu.c104
-rw-r--r--fs/xfs/xfs_inode.c26
-rw-r--r--fs/xfs/xfs_super.c16
-rw-r--r--include/linux/can/dev.h22
-rw-r--r--include/linux/usb.h5
-rw-r--r--include/linux/usb/hcd.h1
-rw-r--r--kernel/exit.c29
-rw-r--r--kernel/sched/proc.c11
-rw-r--r--kernel/trace/ring_buffer.c83
-rw-r--r--lib/dma-debug.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c4
-rw-r--r--scripts/Makefile.extrawarn1
-rw-r--r--sound/soc/codecs/ak4642.c36
98 files changed, 1259 insertions, 535 deletions
diff --git a/Documentation/devicetree/bindings/crypto/samsung-sss.txt b/Documentation/devicetree/bindings/crypto/samsung-sss.txt
index a6dafa83c6df..7a5ca56683cc 100644
--- a/Documentation/devicetree/bindings/crypto/samsung-sss.txt
+++ b/Documentation/devicetree/bindings/crypto/samsung-sss.txt
@@ -23,10 +23,8 @@ Required properties:
- "samsung,exynos4210-secss" for Exynos4210, Exynos4212, Exynos4412, Exynos5250,
Exynos5260 and Exynos5420 SoCs.
- reg : Offset and length of the register set for the module
-- interrupts : interrupt specifiers of SSS module interrupts, should contain
- following entries:
- - first : feed control interrupt (required for all variants),
- - second : hash interrupt (required only for samsung,s5pv210-secss).
+- interrupts : interrupt specifiers of SSS module interrupts (one feed
+ control interrupt).
- clocks : list of clock phandle and specifier pairs for all clocks listed in
clock-names property.
diff --git a/Makefile b/Makefile
index 9b50cb12f5bc..49bf82d53fd0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 18
-SUBLEVEL = 34
+SUBLEVEL = 35
EXTRAVERSION =
NAME = Diseased Newt
@@ -376,7 +376,7 @@ AFLAGS_MODULE =
LDFLAGS_MODULE =
CFLAGS_KERNEL =
AFLAGS_KERNEL =
-CFLAGS_GCOV = -fprofile-arcs -ftest-coverage
+CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im
# Use USERINCLUDE when you must reference the UAPI directories only.
@@ -686,9 +686,10 @@ KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
else
-# This warning generated too much noise in a regular build.
-# Use make W=1 to enable this warning (see scripts/Makefile.build)
+# These warnings generated too much noise in a regular build.
+# Use make W=1 to enable them (see scripts/Makefile.build)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
endif
ifdef CONFIG_FRAME_POINTER
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 3535480e0e6b..0310b03697e0 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -841,11 +841,14 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
old_pmd = *pmd;
- kvm_set_pmd(pmd, *new_pmd);
- if (pmd_present(old_pmd))
+ if (pmd_present(old_pmd)) {
+ pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
- else
+ } else {
get_page(virt_to_page(pmd));
+ }
+
+ kvm_set_pmd(pmd, *new_pmd);
return 0;
}
@@ -882,12 +885,14 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
/* Create 2nd stage page table mapping - Level 3 */
old_pte = *pte;
- kvm_set_pte(pte, *new_pte);
- if (pte_present(old_pte))
+ if (pte_present(old_pte)) {
+ kvm_set_pte(pte, __pte(0));
kvm_tlb_flush_vmid_ipa(kvm, addr);
- else
+ } else {
get_page(virt_to_page(pte));
+ }
+ kvm_set_pte(pte, *new_pte);
return 0;
}
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 88174e0bfafe..31e6b0477e60 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -77,7 +77,6 @@
* Section
*/
#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
-#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index f7ec715ba21e..239192d72a7b 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -288,6 +288,8 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
#endif /* CONFIG_HAVE_RCU_TABLE_FREE */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#define pmd_present(pmd) pte_present(pmd_pte(pmd))
+#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
@@ -295,7 +297,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
-#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
+#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
@@ -335,7 +337,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
#define pmd_none(pmd) (!pmd_val(pmd))
-#define pmd_present(pmd) (pmd_val(pmd))
#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
index b955fafc58ba..d1adc59af5bf 100644
--- a/arch/mips/ath79/early_printk.c
+++ b/arch/mips/ath79/early_printk.c
@@ -31,13 +31,15 @@ static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
} while (1);
}
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
static void prom_putchar_ar71xx(unsigned char ch)
{
void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE));
- prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE);
+ prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
__raw_writel(ch, base + UART_TX * 4);
- prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE);
+ prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
}
static void prom_putchar_ar933x(unsigned char ch)
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 4e3205a3bee2..1616b56eadfe 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -717,7 +717,7 @@ extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare);
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack);
void kvm_mips_init_count(struct kvm_vcpu *vcpu);
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
index af5638b12c75..38bbeda8644c 100644
--- a/arch/mips/include/asm/msa.h
+++ b/arch/mips/include/asm/msa.h
@@ -67,6 +67,19 @@ static inline void restore_msa(struct task_struct *t)
_restore_msa(t);
}
+static inline void init_msa_upper(void)
+{
+ /*
+ * Check cpu_has_msa only if it's a constant. This will allow the
+ * compiler to optimise out code for CPUs without MSA without adding
+ * an extra redundant check for CPUs with MSA.
+ */
+ if (__builtin_constant_p(cpu_has_msa) && !cpu_has_msa)
+ return;
+
+ _init_msa_upper();
+}
+
#ifdef TOOLCHAIN_SUPPORTS_MSA
#define __BUILD_MSA_CTL_REG(name, cs) \
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
index e81174432bab..6e1218ae916c 100644
--- a/arch/mips/include/uapi/asm/siginfo.h
+++ b/arch/mips/include/uapi/asm/siginfo.h
@@ -48,13 +48,13 @@ typedef struct siginfo {
/* kill() */
struct {
- pid_t _pid; /* sender's pid */
+ __kernel_pid_t _pid; /* sender's pid */
__ARCH_SI_UID_T _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
struct {
- timer_t _tid; /* timer id */
+ __kernel_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
sigval_t _sigval; /* same as below */
@@ -63,26 +63,26 @@ typedef struct siginfo {
/* POSIX.1b signals */
struct {
- pid_t _pid; /* sender's pid */
+ __kernel_pid_t _pid; /* sender's pid */
__ARCH_SI_UID_T _uid; /* sender's uid */
sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
- pid_t _pid; /* which child */
+ __kernel_pid_t _pid; /* which child */
__ARCH_SI_UID_T _uid; /* sender's uid */
int _status; /* exit code */
- clock_t _utime;
- clock_t _stime;
+ __kernel_clock_t _utime;
+ __kernel_clock_t _stime;
} _sigchld;
/* IRIX SIGCHLD */
struct {
- pid_t _pid; /* which child */
- clock_t _utime;
+ __kernel_pid_t _pid; /* which child */
+ __kernel_clock_t _utime;
int _status; /* exit code */
- clock_t _stime;
+ __kernel_clock_t _stime;
} _irix_sigchld;
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 636b0745d7c7..7d09efd25b56 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -437,7 +437,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
*sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
regs = (struct pt_regs *)*sp;
pc = regs->cp0_epc;
- if (__kernel_text_address(pc)) {
+ if (!user_mode(regs) && __kernel_text_address(pc)) {
*sp = regs->regs[29];
*ra = regs->regs[31];
return pc;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 2012a5a3055b..aaa64429ea4f 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -141,7 +141,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
if (!task)
task = current;
- if (raw_show_trace || !__kernel_text_address(pc)) {
+ if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
show_raw_backtrace(sp);
return;
}
@@ -1103,7 +1103,7 @@ static int enable_restore_fp_context(int msa)
err = init_fpu();
if (msa && !err) {
enable_msa();
- _init_msa_upper();
+ init_msa_upper();
set_thread_flag(TIF_USEDMSA);
set_thread_flag(TIF_MSA_CTX_LIVE);
}
@@ -1166,7 +1166,7 @@ static int enable_restore_fp_context(int msa)
*/
prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
if (!prior_msa && was_fpu_owner) {
- _init_msa_upper();
+ init_msa_upper();
goto out;
}
@@ -1183,7 +1183,7 @@ static int enable_restore_fp_context(int msa)
* of each vector register such that it cannot see data left
* behind by another task.
*/
- _init_msa_upper();
+ init_msa_upper();
} else {
/* We need to restore the vector context. */
restore_msa(current);
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 838d3a6a5b7d..824be0da92cd 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -302,12 +302,31 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
*/
static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
{
- ktime_t expires;
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ ktime_t expires, threshold;
+ uint32_t count, compare;
int running;
- /* Is the hrtimer pending? */
+ /* Calculate the biased and scaled guest CP0_Count */
+ count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
+ compare = kvm_read_c0_guest_compare(cop0);
+
+ /*
+ * Find whether CP0_Count has reached the closest timer interrupt. If
+ * not, we shouldn't inject it.
+ */
+ if ((int32_t)(count - compare) < 0)
+ return count;
+
+ /*
+ * The CP0_Count we're going to return has already reached the closest
+ * timer interrupt. Quickly check if it really is a new interrupt by
+ * looking at whether the interval until the hrtimer expiry time is
+ * less than 1/4 of the timer period.
+ */
expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
- if (ktime_compare(now, expires) >= 0) {
+ threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
+ if (ktime_before(expires, threshold)) {
/*
* Cancel it while we handle it so there's no chance of
* interference with the timeout handler.
@@ -329,8 +348,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
}
}
- /* Return the biased and scaled guest CP0_Count */
- return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
+ return count;
}
/**
@@ -420,32 +438,6 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
}
/**
- * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
- * @vcpu: Virtual CPU.
- *
- * Recalculates and updates the expiry time of the hrtimer. This can be used
- * after timer parameters have been altered which do not depend on the time that
- * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
- * kvm_mips_resume_hrtimer() are used directly).
- *
- * It is guaranteed that no timer interrupts will be lost in the process.
- *
- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
- */
-static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
-{
- ktime_t now;
- uint32_t count;
-
- /*
- * freeze_hrtimer takes care of a timer interrupts <= count, and
- * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
- */
- now = kvm_mips_freeze_hrtimer(vcpu, &count);
- kvm_mips_resume_hrtimer(vcpu, now, count);
-}
-
-/**
* kvm_mips_write_count() - Modify the count and update timer.
* @vcpu: Virtual CPU.
* @count: Guest CP0_Count value to set.
@@ -540,23 +532,42 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
* kvm_mips_write_compare() - Modify compare and update timer.
* @vcpu: Virtual CPU.
* @compare: New CP0_Compare value.
+ * @ack: Whether to acknowledge timer interrupt.
*
* Update CP0_Compare to a new value and update the timeout.
+ * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
+ * any pending timer interrupt is preserved.
*/
-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
+ int dc;
+ u32 old_compare = kvm_read_c0_guest_compare(cop0);
+ ktime_t now;
+ uint32_t count;
/* if unchanged, must just be an ack */
- if (kvm_read_c0_guest_compare(cop0) == compare)
+ if (old_compare == compare) {
+ if (!ack)
+ return;
+ kvm_mips_callbacks->dequeue_timer_int(vcpu);
+ kvm_write_c0_guest_compare(cop0, compare);
return;
+ }
+
+ /* freeze_hrtimer() takes care of timer interrupts <= count */
+ dc = kvm_mips_count_disabled(vcpu);
+ if (!dc)
+ now = kvm_mips_freeze_hrtimer(vcpu, &count);
+
+ if (ack)
+ kvm_mips_callbacks->dequeue_timer_int(vcpu);
- /* Update compare */
kvm_write_c0_guest_compare(cop0, compare);
- /* Update timeout if count enabled */
- if (!kvm_mips_count_disabled(vcpu))
- kvm_mips_update_hrtimer(vcpu);
+ /* resume_hrtimer() takes care of timer interrupts > count */
+ if (!dc)
+ kvm_mips_resume_hrtimer(vcpu, now, count);
}
/**
@@ -1017,9 +1028,9 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
/* If we are writing to COMPARE */
/* Clear pending timer interrupt, if any */
- kvm_mips_callbacks->dequeue_timer_int(vcpu);
kvm_mips_write_compare(vcpu,
- vcpu->arch.gprs[rt]);
+ vcpu->arch.gprs[rt],
+ true);
} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
kvm_write_c0_guest_status(cop0,
vcpu->arch.gprs[rt]);
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index 4372cc86650c..9bf7b2b83956 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -449,7 +449,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
kvm_mips_write_count(vcpu, v);
break;
case KVM_REG_MIPS_CP0_COMPARE:
- kvm_mips_write_compare(vcpu, v);
+ kvm_mips_write_compare(vcpu, v, false);
break;
case KVM_REG_MIPS_CP0_CAUSE:
/*
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index cac529a405b8..22a2e15bd91b 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -443,9 +443,11 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
case spec_op:
switch (insn.r_format.func) {
case jalr_op:
- regs->regs[insn.r_format.rd] =
- regs->cp0_epc + dec_insn.pc_inc +
- dec_insn.next_pc_inc;
+ if (insn.r_format.rd != 0) {
+ regs->regs[insn.r_format.rd] =
+ regs->cp0_epc + dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ }
/* Fall through */
case jr_op:
*contpc = regs->regs[insn.r_format.rs];
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index daf4add50743..59830c87d6b6 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -479,6 +479,7 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd)
}
#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
+#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd))
#define pmd_young(pmd) pte_young(pmd_pte(pmd))
#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 6535936bdf27..2fa2a44259c8 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -188,6 +188,16 @@ static void *eeh_dev_save_state(void *data, void *userdata)
if (!edev)
return NULL;
+ /*
+ * We cannot access the config space on some adapters.
+ * Otherwise, it will cause fenced PHB. We don't save
+ * the content in their config space and will restore
+ * from the initial config space saved when the EEH
+ * device is created.
+ */
+ if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
+ return NULL;
+
pdev = eeh_dev_to_pci_dev(edev);
if (!pdev)
return NULL;
@@ -327,6 +337,19 @@ static void *eeh_dev_restore_state(void *data, void *userdata)
if (!edev)
return NULL;
+ /*
+ * The content in the config space isn't saved because
+ * the blocked config space on some adapters. We have
+ * to restore the initial saved config space when the
+ * EEH device is created.
+ */
+ if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
+ if (list_is_last(&edev->list, &edev->pe->edevs))
+ eeh_pe_restore_bars(edev->pe);
+
+ return NULL;
+ }
+
pdev = eeh_dev_to_pci_dev(edev);
if (!pdev)
return NULL;
@@ -524,9 +547,6 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe)
/* Save states */
eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
- /* Report error */
- eeh_pe_dev_traverse(pe, eeh_report_error, &result);
-
/* Issue reset */
eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
ret = eeh_reset_pe(pe);
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 5e0198425194..f7487ea09d15 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -938,11 +938,6 @@ hv_facility_unavailable_relon_trampoline:
#endif
STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
- /* Other future vectors */
- .align 7
- .globl __end_interrupts
-__end_interrupts:
-
.align 7
system_call_entry_direct:
#if defined(CONFIG_RELOCATABLE)
@@ -1236,6 +1231,17 @@ __end_handlers:
STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
+ /*
+ * The __end_interrupts marker must be past the out-of-line (OOL)
+ * handlers, so that they are copied to real address 0x100 when running
+ * a relocatable kernel. This ensures they can be reached from the short
+ * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
+ * directly, without using LOAD_HANDLER().
+ */
+ .align 7
+ .globl __end_interrupts
+__end_interrupts:
+
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
* Data area reserved for FWNMI option.
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index bfeb626085ac..1ff9e7864168 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -667,6 +667,13 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline unsigned long pmd_dirty(pmd_t pmd)
+{
+ pte_t pte = __pte(pmd_val(pmd));
+
+ return pte_dirty(pte);
+}
+
static inline unsigned long pmd_young(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 0ab4f9fd2687..3a45668f6dc3 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -50,6 +50,7 @@ void acpi_pic_sci_set_trigger(unsigned int, u16);
extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
int trigger, int polarity);
+extern void (*__acpi_unregister_gsi)(u32 gsi);
static inline void disable_acpi(void)
{
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index aa97a070f09f..081d6f45e006 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -99,6 +99,11 @@ static inline int pte_young(pte_t pte)
return pte_flags(pte) & _PAGE_ACCESSED;
}
+static inline int pmd_dirty(pmd_t pmd)
+{
+ return pmd_flags(pmd) & _PAGE_DIRTY;
+}
+
static inline int pmd_young(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_ACCESSED;
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 4f6844b9657d..df8101fa5bd6 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -434,6 +434,7 @@ int __init pci_xen_hvm_init(void)
* just how GSIs get registered.
*/
__acpi_register_gsi = acpi_register_gsi_xen_hvm;
+ __acpi_unregister_gsi = NULL;
#endif
#ifdef CONFIG_PCI_MSI
@@ -455,8 +456,12 @@ int __init pci_xen_initial_domain(void)
pci_msi_ignore_mask = 1;
#endif
__acpi_register_gsi = acpi_register_gsi_xen;
- /* Pre-allocate legacy irqs */
- for (irq = 0; irq < nr_legacy_irqs(); irq++) {
+ __acpi_unregister_gsi = NULL;
+ /*
+ * Pre-allocate the legacy IRQs. Use NR_LEGACY_IRQS here
+ * because we don't have a PIC and thus nr_legacy_irqs() is zero.
+ */
+ for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
int trigger, polarity;
if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 838359818228..d23c2006f07c 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -138,7 +138,7 @@ static struct osi_linux {
unsigned int enable:1;
unsigned int dmi:1;
unsigned int cmdline:1;
- unsigned int default_disabling:1;
+ u8 default_disabling;
} osi_linux = {0, 0, 0, 0};
static u32 acpi_osi_handler(acpi_string interface, u32 supported)
@@ -1443,10 +1443,13 @@ void __init acpi_osi_setup(char *str)
if (*str == '!') {
str++;
if (*str == '\0') {
- osi_linux.default_disabling = 1;
+ /* Do not override acpi_osi=!* */
+ if (!osi_linux.default_disabling)
+ osi_linux.default_disabling =
+ ACPI_DISABLE_ALL_VENDOR_STRINGS;
return;
} else if (*str == '*') {
- acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
+ osi_linux.default_disabling = ACPI_DISABLE_ALL_STRINGS;
for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
osi = &osi_setup_entries[i];
osi->enable = false;
@@ -1519,10 +1522,13 @@ static void __init acpi_osi_setup_late(void)
acpi_status status;
if (osi_linux.default_disabling) {
- status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
+ status = acpi_update_interfaces(osi_linux.default_disabling);
if (ACPI_SUCCESS(status))
- printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
+ printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors%s\n",
+ osi_linux.default_disabling ==
+ ACPI_DISABLE_ALL_STRINGS ?
+ " and feature groups" : "");
}
for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 9717d5f20139..508a8f67c028 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1251,14 +1251,15 @@ int dpm_suspend_late(pm_message_t state)
error = device_suspend_late(dev);
mutex_lock(&dpm_list_mtx);
+ if (!list_empty(&dev->power.entry))
+ list_move(&dev->power.entry, &dpm_late_early_list);
+
if (error) {
pm_dev_err(dev, state, " late", error);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
}
- if (!list_empty(&dev->power.entry))
- list_move(&dev->power.entry, &dpm_late_early_list);
put_device(dev);
if (async_error)
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 67c7938e430b..f6f1f90e9448 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1474,11 +1474,16 @@ int pm_runtime_force_resume(struct device *dev)
goto out;
}
- ret = callback(dev);
+ ret = pm_runtime_set_active(dev);
if (ret)
goto out;
- pm_runtime_set_active(dev);
+ ret = callback(dev);
+ if (ret) {
+ pm_runtime_set_suspended(dev);
+ goto out;
+ }
+
pm_runtime_mark_last_busy(dev);
out:
pm_runtime_enable(dev);
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 6653473f2757..eaa646dfa783 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -50,6 +50,7 @@ struct vhci_data {
wait_queue_head_t read_wait;
struct sk_buff_head readq;
+ struct mutex open_mutex;
struct delayed_work open_timeout;
};
@@ -95,12 +96,15 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
-static int vhci_create_device(struct vhci_data *data, __u8 opcode)
+static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
{
struct hci_dev *hdev;
struct sk_buff *skb;
__u8 dev_type;
+ if (data->hdev)
+ return -EBADFD;
+
/* bits 0-1 are dev_type (BR/EDR or AMP) */
dev_type = opcode & 0x03;
@@ -159,6 +163,17 @@ static int vhci_create_device(struct vhci_data *data, __u8 opcode)
return 0;
}
+static int vhci_create_device(struct vhci_data *data, __u8 opcode)
+{
+ int err;
+
+ mutex_lock(&data->open_mutex);
+ err = __vhci_create_device(data, opcode);
+ mutex_unlock(&data->open_mutex);
+
+ return err;
+}
+
static inline ssize_t vhci_get_user(struct vhci_data *data,
struct iov_iter *from)
{
@@ -197,11 +212,6 @@ static inline ssize_t vhci_get_user(struct vhci_data *data,
break;
case HCI_VENDOR_PKT:
- if (data->hdev) {
- kfree_skb(skb);
- return -EBADFD;
- }
-
cancel_delayed_work_sync(&data->open_timeout);
opcode = *((__u8 *) skb->data);
@@ -328,6 +338,7 @@ static int vhci_open(struct inode *inode, struct file *file)
skb_queue_head_init(&data->readq);
init_waitqueue_head(&data->read_wait);
+ mutex_init(&data->open_mutex);
INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout);
file->private_data = data;
@@ -341,15 +352,18 @@ static int vhci_open(struct inode *inode, struct file *file)
static int vhci_release(struct inode *inode, struct file *file)
{
struct vhci_data *data = file->private_data;
- struct hci_dev *hdev = data->hdev;
+ struct hci_dev *hdev;
cancel_delayed_work_sync(&data->open_timeout);
+ hdev = data->hdev;
+
if (hdev) {
hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
+ skb_queue_purge(&data->readq);
file->private_data = NULL;
kfree(data);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 9ab99642ca7a..59dc6db6be15 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -127,7 +127,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
time_end = ktime_get();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
- if (!cpuidle_state_is_coupled(dev, drv, entered_state))
+ if (!cpuidle_state_is_coupled(dev, drv, index))
local_irq_enable();
diff = ktime_to_us(ktime_sub(time_end, time_start));
@@ -355,6 +355,8 @@ static void __cpuidle_unregister_device(struct cpuidle_device *dev)
list_del(&dev->device_list);
per_cpu(cpuidle_devices, dev->cpu) = NULL;
module_put(drv->owner);
+
+ dev->registered = 0;
}
static void __cpuidle_device_init(struct cpuidle_device *dev)
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 4d18e27ffa9e..e87d12545754 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -244,7 +244,7 @@ static void caam_jr_dequeue(unsigned long devarg)
struct device *caam_jr_alloc(void)
{
struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
- struct device *dev = NULL;
+ struct device *dev = ERR_PTR(-ENODEV);
int min_tfm_cnt = INT_MAX;
int tfm_cnt;
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 4197ad9a711b..658fa533ced1 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -149,7 +149,6 @@
/**
* struct samsung_aes_variant - platform specific SSS driver data
- * @has_hash_irq: true if SSS module uses hash interrupt, false otherwise
* @aes_offset: AES register offset from SSS module's base.
*
* Specifies platform specific configuration of SSS module.
@@ -157,7 +156,6 @@
* expansion of its usage.
*/
struct samsung_aes_variant {
- bool has_hash_irq;
unsigned int aes_offset;
};
@@ -178,7 +176,6 @@ struct s5p_aes_dev {
struct clk *clk;
void __iomem *ioaddr;
void __iomem *aes_ioaddr;
- int irq_hash;
int irq_fc;
struct ablkcipher_request *req;
@@ -197,12 +194,10 @@ struct s5p_aes_dev {
static struct s5p_aes_dev *s5p_dev;
static const struct samsung_aes_variant s5p_aes_data = {
- .has_hash_irq = true,
.aes_offset = 0x4000,
};
static const struct samsung_aes_variant exynos_aes_data = {
- .has_hash_irq = false,
.aes_offset = 0x200,
};
@@ -313,43 +308,55 @@ static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
return err;
}
-static void s5p_aes_tx(struct s5p_aes_dev *dev)
+/*
+ * Returns true if new transmitting (output) data is ready and its
+ * address+length have to be written to device (by calling
+ * s5p_set_dma_outdata()). False otherwise.
+ */
+static bool s5p_aes_tx(struct s5p_aes_dev *dev)
{
int err = 0;
+ bool ret = false;
s5p_unset_outdata(dev);
if (!sg_is_last(dev->sg_dst)) {
err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
- if (err) {
+ if (err)
s5p_aes_complete(dev, err);
- return;
- }
-
- s5p_set_dma_outdata(dev, dev->sg_dst);
+ else
+ ret = true;
} else {
s5p_aes_complete(dev, err);
dev->busy = true;
tasklet_schedule(&dev->tasklet);
}
+
+ return ret;
}
-static void s5p_aes_rx(struct s5p_aes_dev *dev)
+/*
+ * Returns true if new receiving (input) data is ready and its
+ * address+length have to be written to device (by calling
+ * s5p_set_dma_indata()). False otherwise.
+ */
+static bool s5p_aes_rx(struct s5p_aes_dev *dev)
{
int err;
+ bool ret = false;
s5p_unset_indata(dev);
if (!sg_is_last(dev->sg_src)) {
err = s5p_set_indata(dev, sg_next(dev->sg_src));
- if (err) {
+ if (err)
s5p_aes_complete(dev, err);
- return;
- }
-
- s5p_set_dma_indata(dev, dev->sg_src);
+ else
+ ret = true;
}
+
+ return ret;
}
static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
@@ -358,18 +365,29 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
uint32_t status;
unsigned long flags;
+ bool set_dma_tx = false;
+ bool set_dma_rx = false;
spin_lock_irqsave(&dev->lock, flags);
- if (irq == dev->irq_fc) {
- status = SSS_READ(dev, FCINTSTAT);
- if (status & SSS_FCINTSTAT_BRDMAINT)
- s5p_aes_rx(dev);
- if (status & SSS_FCINTSTAT_BTDMAINT)
- s5p_aes_tx(dev);
-
- SSS_WRITE(dev, FCINTPEND, status);
- }
+ status = SSS_READ(dev, FCINTSTAT);
+ if (status & SSS_FCINTSTAT_BRDMAINT)
+ set_dma_rx = s5p_aes_rx(dev);
+ if (status & SSS_FCINTSTAT_BTDMAINT)
+ set_dma_tx = s5p_aes_tx(dev);
+
+ SSS_WRITE(dev, FCINTPEND, status);
+
+ /*
+ * Writing length of DMA block (either receiving or transmitting)
+ * will start the operation immediately, so this should be done
+ * at the end (even after clearing pending interrupts to not miss the
+ * interrupt).
+ */
+ if (set_dma_tx)
+ s5p_set_dma_outdata(dev, dev->sg_dst);
+ if (set_dma_rx)
+ s5p_set_dma_indata(dev, dev->sg_src);
spin_unlock_irqrestore(&dev->lock, flags);
@@ -671,21 +689,6 @@ static int s5p_aes_probe(struct platform_device *pdev)
goto err_irq;
}
- if (variant->has_hash_irq) {
- pdata->irq_hash = platform_get_irq(pdev, 1);
- if (pdata->irq_hash < 0) {
- err = pdata->irq_hash;
- dev_warn(dev, "hash interrupt is not available.\n");
- goto err_irq;
- }
- err = devm_request_irq(dev, pdata->irq_hash, s5p_aes_interrupt,
- IRQF_SHARED, pdev->name, pdev);
- if (err < 0) {
- dev_warn(dev, "hash interrupt is not available.\n");
- goto err_irq;
- }
- }
-
pdata->busy = false;
pdata->variant = variant;
pdata->dev = dev;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 88afc48c2ca7..3ed46c8cd2a0 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -531,6 +531,10 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
}
+ /* Set bits of CONFIG register with dynamic context switching */
+ if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
+ writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
+
return ret ? 0 : -ETIMEDOUT;
}
@@ -1399,9 +1403,6 @@ static int __init sdma_init(struct sdma_engine *sdma)
writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
- /* Set bits of CONFIG register with given context switching mode */
- writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
-
/* Initializes channel's priorities */
sdma_set_channel_priority(&sdma->channel[0], 7);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index e9a2827ad1c4..1463804b63f0 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1453,7 +1453,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
int n, int width, int height)
{
int c, o;
- struct drm_device *dev = fb_helper->dev;
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
@@ -1472,7 +1471,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
if (modes[n] == NULL)
return best_score;
- crtcs = kzalloc(dev->mode_config.num_connector *
+ crtcs = kzalloc(fb_helper->connector_count *
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
if (!crtcs)
return best_score;
@@ -1518,7 +1517,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
if (score > best_score) {
best_score = score;
memcpy(best_crtcs, crtcs,
- dev->mode_config.num_connector *
+ fb_helper->connector_count *
sizeof(struct drm_fb_helper_crtc *));
}
}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index 87885d8c06e8..4869117b69eb 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -85,7 +85,7 @@ static const char *const dsi_errors[] = {
"RX Prot Violation",
"HS Generic Write FIFO Full",
"LP Generic Write FIFO Full",
- "Generic Read Data Avail"
+ "Generic Read Data Avail",
"Special Packet Sent",
"Tearing Effect",
};
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index c64f1942e8de..993df2dded42 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2952,6 +2952,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+ memset(active, 0, sizeof(*active));
+
active->pipe_enabled = intel_crtc_active(crtc);
if (active->pipe_enabled) {
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 421e29e4cd81..5221450f9b57 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -895,9 +895,15 @@ static long uinput_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
#ifdef CONFIG_COMPAT
+
+#define UI_SET_PHYS_COMPAT _IOW(UINPUT_IOCTL_BASE, 108, compat_uptr_t)
+
static long uinput_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
+ if (cmd == UI_SET_PHYS_COMPAT)
+ cmd = UI_SET_PHYS;
+
return uinput_ioctl_handler(file, cmd, arg, compat_ptr(arg));
}
#endif
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 7cab6b93d2e6..62acb0ced45a 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -285,6 +285,13 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
if (irqnr < 16) {
gic_write_eoir(irqnr);
#ifdef CONFIG_SMP
+ /*
+ * Unlike GICv2, we don't need an smp_rmb() here.
+ * The control dependency from gic_read_iar to
+ * the ISB in gic_write_eoir is enough to ensure
+ * that any shared data read by handle_IPI will
+ * be read after the ACK.
+ */
handle_IPI(irqnr, regs);
#else
WARN_ONCE(true, "Unexpected SGI received!\n");
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index fe3223b2c23d..d67507911f9f 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -283,6 +283,14 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
if (irqnr < 16) {
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
#ifdef CONFIG_SMP
+ /*
+ * Ensure any shared data written by the CPU sending
+ * the IPI is read after we've read the ACK register
+ * on the GIC.
+ *
+ * Pairs with the write barrier in gic_raise_softirq
+ */
+ smp_rmb();
handle_IPI(irqnr, regs);
#endif
continue;
diff --git a/drivers/mcb/mcb-parse.c b/drivers/mcb/mcb-parse.c
index 004926955263..b0155b05cddb 100644
--- a/drivers/mcb/mcb-parse.c
+++ b/drivers/mcb/mcb-parse.c
@@ -57,7 +57,7 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
mdev->id = GDD_DEV(reg1);
mdev->rev = GDD_REV(reg1);
mdev->var = GDD_VAR(reg1);
- mdev->bar = GDD_BAR(reg1);
+ mdev->bar = GDD_BAR(reg2);
mdev->group = GDD_GRP(reg2);
mdev->inst = GDD_INS(reg2);
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 532eacab6b46..0f8cd6bbe914 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -269,6 +269,8 @@ static int usbtll_omap_probe(struct platform_device *pdev)
if (IS_ERR(tll->ch_clk[i]))
dev_dbg(dev, "can't get clock : %s\n", clkname);
+ else
+ clk_prepare(tll->ch_clk[i]);
}
pm_runtime_put_sync(dev);
@@ -301,9 +303,12 @@ static int usbtll_omap_remove(struct platform_device *pdev)
tll_dev = NULL;
spin_unlock(&tll_lock);
- for (i = 0; i < tll->nch; i++)
- if (!IS_ERR(tll->ch_clk[i]))
+ for (i = 0; i < tll->nch; i++) {
+ if (!IS_ERR(tll->ch_clk[i])) {
+ clk_unprepare(tll->ch_clk[i]);
clk_put(tll->ch_clk[i]);
+ }
+ }
pm_runtime_disable(&pdev->dev);
return 0;
@@ -421,7 +426,7 @@ int omap_tll_enable(struct usbhs_omap_platform_data *pdata)
if (IS_ERR(tll->ch_clk[i]))
continue;
- r = clk_prepare_enable(tll->ch_clk[i]);
+ r = clk_enable(tll->ch_clk[i]);
if (r) {
dev_err(tll_dev,
"Error enabling ch %d clock: %d\n", i, r);
@@ -449,7 +454,7 @@ int omap_tll_disable(struct usbhs_omap_platform_data *pdata)
for (i = 0; i < tll->nch; i++) {
if (omap_usb_mode_needs_tll(pdata->port_mode[i])) {
if (!IS_ERR(tll->ch_clk[i]))
- clk_disable_unprepare(tll->ch_clk[i]);
+ clk_disable(tll->ch_clk[i]);
}
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 10ecc0a0112b..5027ae76fde8 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -2410,11 +2410,12 @@ static const struct mmc_fixup blk_fixups[] =
MMC_QUIRK_BLK_NO_CMD23),
/*
- * Some Micron MMC cards needs longer data read timeout than
- * indicated in CSD.
+ * Some MMC cards need longer data read timeout than indicated in CSD.
*/
MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
MMC_QUIRK_LONG_READ_TIME),
+ MMC_FIXUP("008GE0", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
/*
* On these Samsung MoviNAND parts, performing secure erase or
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 01315db9a81a..eefe36afa601 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -811,11 +811,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
/*
* Some cards require longer data read timeout than indicated in CSD.
* Address this by setting the read timeout to a "reasonably high"
- * value. For the cards tested, 300ms has proven enough. If necessary,
+ * value. For the cards tested, 600ms has proven enough. If necessary,
* this value can be increased if other problematic cards require this.
*/
if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
- data->timeout_ns = 300000000;
+ data->timeout_ns = 600000000;
data->timeout_clks = 0;
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 66c5c9f313a0..449aa7a4227d 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -388,6 +388,9 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
}
}
+/* Minimum partition switch timeout in milliseconds */
+#define MMC_MIN_PART_SWITCH_TIME 300
+
/*
* Decode extended CSD.
*/
@@ -450,6 +453,10 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
/* EXT_CSD value is in units of 10ms, but we store in ms */
card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
+ /* Some eMMC set the value too low so set a minimum */
+ if (card->ext_csd.part_time &&
+ card->ext_csd.part_time < MMC_MIN_PART_SWITCH_TIME)
+ card->ext_csd.part_time = MMC_MIN_PART_SWITCH_TIME;
/* Sleep / awake timeout in 100ns units */
if (sa_shift > 0 && sa_shift <= 0x17)
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 9cccc0e89b04..1de7056ccd92 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -180,7 +180,8 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
.chip = &sdhci_acpi_chip_int,
.caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
- MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR,
+ MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR |
+ MMC_CAP_WAIT_WHILE_BUSY,
.caps2 = MMC_CAP2_HC_ERASE_SZ,
.flags = SDHCI_ACPI_RUNTIME_PM,
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_STOP_WITH_TC,
@@ -190,7 +191,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = {
.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION,
.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON,
- .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD,
+ .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD |
+ MMC_CAP_WAIT_WHILE_BUSY,
.flags = SDHCI_ACPI_RUNTIME_PM,
.pm_caps = MMC_PM_KEEP_POWER,
.probe_slot = sdhci_acpi_sdio_probe_slot,
@@ -201,6 +203,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = {
SDHCI_ACPI_RUNTIME_PM,
.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
SDHCI_QUIRK2_STOP_WITH_TC,
+ .caps = MMC_CAP_WAIT_WHILE_BUSY,
.probe_slot = sdhci_acpi_sd_probe_slot,
};
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 493f7b3dbc33..0aa7087438fa 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -425,8 +425,27 @@ retry:
ubi_warn("corrupted VID header at PEB %d, LEB %d:%d",
pnum, vol_id, lnum);
err = -EBADMSG;
- } else
- ubi_ro_mode(ubi);
+ } else {
+ /*
+ * Ending up here in the non-Fastmap case
+ * is a clear bug as the VID header had to
+ * be present at scan time to have it referenced.
+ * With fastmap the story is more complicated.
+ * Fastmap has the mapping info without the need
+ * of a full scan. So the LEB could have been
+ * unmapped, Fastmap cannot know this and keeps
+ * the LEB referenced.
+ * This is valid and works as the layer above UBI
+ * has to do bookkeeping about used/referenced
+ * LEBs in any case.
+ */
+ if (ubi->fast_attach) {
+ err = -EBADMSG;
+ } else {
+ err = -EINVAL;
+ ubi_ro_mode(ubi);
+ }
+ }
}
goto out_free;
} else if (err == UBI_IO_BITFLIPS)
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index cfd5b5e90156..9d38605717a0 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1071,6 +1071,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
ubi->fm_disabled = 0;
+ ubi->fast_attach = 1;
ubi_free_vid_hdr(ubi, vh);
kfree(ech);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 320fc38fa2a1..206607668ee4 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -426,6 +426,8 @@ struct ubi_debug_info {
* @fm_size: fastmap size in bytes
* @fm_sem: allows ubi_update_fastmap() to block EBA table changes
* @fm_work: fastmap work queue
+ * @fm_work_scheduled: non-zero if fastmap work was scheduled
+ * @fast_attach: non-zero if UBI was attached by fastmap
*
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -437,7 +439,7 @@ struct ubi_debug_info {
* @pq_head: protection queue head
* @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
* @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
- * @erroneous, and @erroneous_peb_count fields
+ * @erroneous, @erroneous_peb_count, and @fm_work_scheduled fields
* @move_mutex: serializes eraseblock moves
* @work_sem: used to wait for all the scheduled works to finish and prevent
* new works from being submitted
@@ -532,6 +534,8 @@ struct ubi_device {
void *fm_buf;
size_t fm_size;
struct work_struct fm_work;
+ int fm_work_scheduled;
+ int fast_attach;
/* Wear-leveling sub-system's stuff */
struct rb_root used;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 21d03130d8a7..e9b82816b8ce 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -149,6 +149,9 @@ static void update_fastmap_work_fn(struct work_struct *wrk)
{
struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
ubi_update_fastmap(ubi);
+ spin_lock(&ubi->wl_lock);
+ ubi->fm_work_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
}
/**
@@ -657,7 +660,10 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
/* We cannot update the fastmap here because this
* function is called in atomic context.
* Let's fail here and refill/update it as soon as possible. */
- schedule_work(&ubi->fm_work);
+ if (!ubi->fm_work_scheduled) {
+ ubi->fm_work_scheduled = 1;
+ schedule_work(&ubi->fm_work);
+ }
return NULL;
} else {
pnum = pool->pebs[pool->used++];
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 573b53b38af4..80185ebc7a43 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -615,11 +615,17 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
/* allow change of MTU according to the CANFD ability of the device */
switch (new_mtu) {
case CAN_MTU:
+ /* 'CANFD-only' controllers can not switch to CAN_MTU */
+ if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
+ return -EINVAL;
+
priv->ctrlmode &= ~CAN_CTRLMODE_FD;
break;
case CANFD_MTU:
- if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD))
+ /* check for potential CANFD ability */
+ if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
+ !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
return -EINVAL;
priv->ctrlmode |= CAN_CTRLMODE_FD;
@@ -701,6 +707,35 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
= { .len = sizeof(struct can_bittiming_const) },
};
+static int can_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ bool is_can_fd = false;
+
+ /* Make sure that valid CAN FD configurations always consist of
+ * - nominal/arbitration bittiming
+ * - data bittiming
+ * - control mode with CAN_CTRLMODE_FD set
+ */
+
+ if (data[IFLA_CAN_CTRLMODE]) {
+ struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+
+ is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
+ }
+
+ if (is_can_fd) {
+ if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
+ return -EOPNOTSUPP;
+ }
+
+ if (data[IFLA_CAN_DATA_BITTIMING]) {
+ if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int can_changelink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
@@ -732,19 +767,31 @@ static int can_changelink(struct net_device *dev,
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm;
+ u32 ctrlstatic;
+ u32 maskedflags;
/* Do not allow changing controller mode while running */
if (dev->flags & IFF_UP)
return -EBUSY;
cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ ctrlstatic = priv->ctrlmode_static;
+ maskedflags = cm->flags & cm->mask;
+
+ /* check whether provided bits are allowed to be passed */
+ if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
+ return -EOPNOTSUPP;
+
+ /* do not check for static fd-non-iso if 'fd' is disabled */
+ if (!(maskedflags & CAN_CTRLMODE_FD))
+ ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
- /* check whether changed bits are allowed to be modified */
- if (cm->mask & ~priv->ctrlmode_supported)
+ /* make sure static options are provided by configuration */
+ if ((maskedflags & ctrlstatic) != ctrlstatic)
return -EOPNOTSUPP;
/* clear bits to be modified and copy the flag values */
priv->ctrlmode &= ~cm->mask;
- priv->ctrlmode |= (cm->flags & cm->mask);
+ priv->ctrlmode |= maskedflags;
/* CAN_CTRLMODE_FD can only be set when driver supports FD */
if (priv->ctrlmode & CAN_CTRLMODE_FD)
@@ -885,6 +932,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
.maxtype = IFLA_CAN_MAX,
.policy = can_policy,
.setup = can_setup,
+ .validate = can_validate,
.newlink = can_newlink,
.changelink = can_changelink,
.get_size = can_get_size,
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 244529881be9..62143cd7018f 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -957,7 +957,7 @@ static struct net_device *alloc_m_can_dev(void)
priv->can.do_get_berr_counter = m_can_get_berr_counter;
/* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
- priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
+ can_set_static_ctrlmode(dev, CAN_CTRLMODE_FD_NON_ISO);
/* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index 0beb7e7d6075..7349b05d6a7d 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -77,7 +77,7 @@ static const struct pci_device_id ath5k_led_devices[] = {
/* HP Compaq CQ60-206US (ddreggors@jumptv.com) */
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
/* HP Compaq C700 (nitrousnrg@gmail.com) */
- { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
+ { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 0) },
/* LiteOn AR5BXB63 (magooz@salug.it) */
{ ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
/* IBM-specific AR5212 (all others) */
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index c018dea0b2e8..5ca08edb3988 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -28,6 +28,16 @@ static const struct pci_device_id ath_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
+
+#ifdef CONFIG_ATH9K_PCOEM
+ /* Mini PCI AR9220 MB92 cards: Compex WLM200NX, Wistron DNMA-92 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0029,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x2096),
+ .driver_data = ATH9K_PCI_LED_ACT_HI },
+#endif
+
{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index af2486965782..339f94e72555 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -1587,9 +1587,9 @@ void rtl_watchdog_wq_callback(void *data)
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
(rtlpriv->link_info.num_rx_inperiod > 2))
- rtl_lps_enter(hw);
- else
rtl_lps_leave(hw);
+ else
+ rtl_lps_enter(hw);
}
rtlpriv->link_info.num_rx_inperiod = 0;
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 348d5aec7682..2afe4cc161b5 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -1573,7 +1573,7 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
true,
HW_DESC_TXBUFF_ADDR),
skb->len, PCI_DMA_TODEVICE);
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
ring->idx = 0;
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index 775e7bc292f2..24a855238d13 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -93,7 +93,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
rtl8723be_bt_reg_init(hw);
- rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
rtlpriv->dm.dm_initialgain_enable = 1;
@@ -151,6 +150,10 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
+ rtlpriv->cfg->mod_params->sw_crypto =
+ rtlpriv->cfg->mod_params->sw_crypto;
+ rtlpriv->cfg->mod_params->disable_watchdog =
+ rtlpriv->cfg->mod_params->disable_watchdog;
if (rtlpriv->cfg->mod_params->disable_watchdog)
pr_info("watchdog disabled\n");
rtlpriv->psc.reg_fwctrl_lps = 3;
@@ -267,6 +270,9 @@ static struct rtl_mod_params rtl8723be_mod_params = {
.inactiveps = true,
.swctrl_lps = false,
.fwctrl_lps = true,
+ .msi_support = false,
+ .disable_watchdog = false,
+ .debug = DBG_EMERG,
};
static struct rtl_hal_cfg rtl8723be_hal_cfg = {
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index f64d5756142c..d5c10c5cadb4 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -178,9 +178,6 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct pci_bus_region region, inverted_region;
bool bar_too_big = false, bar_too_high = false, bar_invalid = false;
- if (dev->non_compliant_bars)
- return 0;
-
mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
/* No printks while decoding is disabled! */
@@ -332,6 +329,9 @@ static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
{
unsigned int pos, reg;
+ if (dev->non_compliant_bars)
+ return;
+
for (pos = 0; pos < howmany; pos++) {
struct resource *res = &dev->resource[pos];
reg = PCI_BASE_ADDRESS_0 + (pos << 2);
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos5440.c b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
index 88acfc0efd54..9a77a2a06b0f 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos5440.c
@@ -109,6 +109,7 @@ struct exynos5440_pmx_func {
* @nr_groups: number of pin groups available.
* @pmx_functions: list of pin functions parsed from device tree.
* @nr_functions: number of pin functions available.
+ * @range: gpio range to register with pinctrl
*/
struct exynos5440_pinctrl_priv_data {
void __iomem *reg_base;
@@ -119,6 +120,7 @@ struct exynos5440_pinctrl_priv_data {
unsigned int nr_groups;
const struct exynos5440_pmx_func *pmx_functions;
unsigned int nr_functions;
+ struct pinctrl_gpio_range range;
};
/**
@@ -769,7 +771,6 @@ static int exynos5440_pinctrl_register(struct platform_device *pdev,
struct pinctrl_desc *ctrldesc;
struct pinctrl_dev *pctl_dev;
struct pinctrl_pin_desc *pindesc, *pdesc;
- struct pinctrl_gpio_range grange;
char *pin_names;
int pin, ret;
@@ -827,12 +828,12 @@ static int exynos5440_pinctrl_register(struct platform_device *pdev,
return -EINVAL;
}
- grange.name = "exynos5440-pctrl-gpio-range";
- grange.id = 0;
- grange.base = 0;
- grange.npins = EXYNOS5440_MAX_PINS;
- grange.gc = priv->gc;
- pinctrl_add_gpio_range(pctl_dev, &grange);
+ priv->range.name = "exynos5440-pctrl-gpio-range";
+ priv->range.id = 0;
+ priv->range.base = 0;
+ priv->range.npins = EXYNOS5440_MAX_PINS;
+ priv->range.gc = priv->gc;
+ pinctrl_add_gpio_range(pctl_dev, &priv->range);
return 0;
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index bc3d80f9d5d6..872e53f15590 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -110,6 +110,11 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
struct device *dev,
const char *supply_name);
+static struct regulator_dev *dev_to_rdev(struct device *dev)
+{
+ return container_of(dev, struct regulator_dev, dev);
+}
+
static const char *rdev_get_name(struct regulator_dev *rdev)
{
if (rdev->constraints && rdev->constraints->name)
@@ -3994,13 +3999,57 @@ static int __init regulator_init(void)
/* init early to allow our consumers to complete system booting */
core_initcall(regulator_init);
-static int __init regulator_init_complete(void)
+static int __init regulator_late_cleanup(struct device *dev, void *data)
{
- struct regulator_dev *rdev;
- const struct regulator_ops *ops;
- struct regulation_constraints *c;
+ struct regulator_dev *rdev = dev_to_rdev(dev);
+ const struct regulator_ops *ops = rdev->desc->ops;
+ struct regulation_constraints *c = rdev->constraints;
int enabled, ret;
+ if (c && c->always_on)
+ return 0;
+
+ if (c && !(c->valid_ops_mask & REGULATOR_CHANGE_STATUS))
+ return 0;
+
+ mutex_lock(&rdev->mutex);
+
+ if (rdev->use_count)
+ goto unlock;
+
+ /* If we can't read the status assume it's on. */
+ if (ops->is_enabled)
+ enabled = ops->is_enabled(rdev);
+ else
+ enabled = 1;
+
+ if (!enabled)
+ goto unlock;
+
+ if (have_full_constraints()) {
+ /* We log since this may kill the system if it goes
+ * wrong. */
+ rdev_info(rdev, "disabling\n");
+ ret = _regulator_do_disable(rdev);
+ if (ret != 0)
+ rdev_err(rdev, "couldn't disable: %d\n", ret);
+ } else {
+ /* The intention is that in future we will
+ * assume that full constraints are provided
+ * so warn even if we aren't going to do
+ * anything here.
+ */
+ rdev_warn(rdev, "incomplete constraints, leaving on\n");
+ }
+
+unlock:
+ mutex_unlock(&rdev->mutex);
+
+ return 0;
+}
+
+static int __init regulator_init_complete(void)
+{
/*
* Since DT doesn't provide an idiomatic mechanism for
* enabling full constraints and since it's much more natural
@@ -4010,58 +4059,13 @@ static int __init regulator_init_complete(void)
if (of_have_populated_dt())
has_full_constraints = true;
- mutex_lock(&regulator_list_mutex);
-
/* If we have a full configuration then disable any regulators
* we have permission to change the status for and which are
* not in use or always_on. This is effectively the default
* for DT and ACPI as they have full constraints.
*/
- list_for_each_entry(rdev, &regulator_list, list) {
- ops = rdev->desc->ops;
- c = rdev->constraints;
-
- if (c && c->always_on)
- continue;
-
- if (c && !(c->valid_ops_mask & REGULATOR_CHANGE_STATUS))
- continue;
-
- mutex_lock(&rdev->mutex);
-
- if (rdev->use_count)
- goto unlock;
-
- /* If we can't read the status assume it's on. */
- if (ops->is_enabled)
- enabled = ops->is_enabled(rdev);
- else
- enabled = 1;
-
- if (!enabled)
- goto unlock;
-
- if (have_full_constraints()) {
- /* We log since this may kill the system if it
- * goes wrong. */
- rdev_info(rdev, "disabling\n");
- ret = _regulator_do_disable(rdev);
- if (ret != 0)
- rdev_err(rdev, "couldn't disable: %d\n", ret);
- } else {
- /* The intention is that in future we will
- * assume that full constraints are provided
- * so warn even if we aren't going to do
- * anything here.
- */
- rdev_warn(rdev, "incomplete constraints, leaving on\n");
- }
-
-unlock:
- mutex_unlock(&rdev->mutex);
- }
-
- mutex_unlock(&regulator_list_mutex);
+ class_for_each_device(&regulator_class, NULL, NULL,
+ regulator_late_cleanup);
return 0;
}
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 6b32ddcefc11..ce177a50ec05 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -590,10 +590,10 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
}
return -EFAULT;
}
- /* We used to udelay() here but that absorbed
- * a CPU when a timeout occured. Not very
- * useful. */
- cpu_relax();
+ /*
+ * Allow other processes / CPUS to use core
+ */
+ schedule();
}
} else if (down_interruptible(&fibptr->event_wait)) {
/* Do nothing ... satisfy
@@ -1921,6 +1921,10 @@ int aac_command_thread(void *data)
if (difference <= 0)
difference = 1;
set_current_state(TASK_INTERRUPTIBLE);
+
+ if (kthread_should_stop())
+ break;
+
schedule_timeout(difference);
if (kthread_should_stop())
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 0dde34e3a7c5..545c60c826a1 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -444,6 +444,7 @@ int tb_drom_read(struct tb_switch *sw)
return tb_drom_parse_entries(sw);
err:
kfree(sw->drom);
+ sw->drom = NULL;
return -EIO;
}
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index bce16e405d59..db37ee49c3bf 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -2045,7 +2045,9 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm)
}
}
spin_unlock(&gsm_mux_lock);
- WARN_ON(i == MAX_MUX);
+ /* open failed before registering => nothing to do */
+ if (i == MAX_MUX)
+ return;
/* In theory disconnecting DLCI 0 is sufficient but for some
modems this is apparently not the case. */
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index c107a0f0e72f..c859eab47bd8 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -1478,6 +1478,9 @@ static struct of_device_id ucc_uart_match[] = {
.type = "serial",
.compatible = "ucc_uart",
},
+ {
+ .compatible = "fsl,t1040-ucc-uart",
+ },
{},
};
MODULE_DEVICE_TABLE(of, ucc_uart_match);
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 66be3b43de9f..3c6adc16b34f 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -283,7 +283,7 @@ static int usb_probe_interface(struct device *dev)
struct usb_device *udev = interface_to_usbdev(intf);
const struct usb_device_id *id;
int error = -ENODEV;
- int lpm_disable_error;
+ int lpm_disable_error = -ENODEV;
dev_dbg(dev, "%s\n", __func__);
@@ -331,12 +331,14 @@ static int usb_probe_interface(struct device *dev)
* setting during probe, that should also be fine. usb_set_interface()
* will attempt to disable LPM, and fail if it can't disable it.
*/
- lpm_disable_error = usb_unlocked_disable_lpm(udev);
- if (lpm_disable_error && driver->disable_hub_initiated_lpm) {
- dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.",
- __func__, driver->name);
- error = lpm_disable_error;
- goto err;
+ if (driver->disable_hub_initiated_lpm) {
+ lpm_disable_error = usb_unlocked_disable_lpm(udev);
+ if (lpm_disable_error) {
+ dev_err(&intf->dev, "%s Failed to disable LPM for driver %s\n.",
+ __func__, driver->name);
+ error = lpm_disable_error;
+ goto err;
+ }
}
/* Carry out a deferred switch to altsetting 0 */
@@ -386,7 +388,8 @@ static int usb_unbind_interface(struct device *dev)
struct usb_interface *intf = to_usb_interface(dev);
struct usb_host_endpoint *ep, **eps = NULL;
struct usb_device *udev;
- int i, j, error, r, lpm_disable_error;
+ int i, j, error, r;
+ int lpm_disable_error = -ENODEV;
intf->condition = USB_INTERFACE_UNBINDING;
@@ -394,12 +397,13 @@ static int usb_unbind_interface(struct device *dev)
udev = interface_to_usbdev(intf);
error = usb_autoresume_device(udev);
- /* Hub-initiated LPM policy may change, so attempt to disable LPM until
+ /* If hub-initiated LPM policy may change, attempt to disable LPM until
* the driver is unbound. If LPM isn't disabled, that's fine because it
* wouldn't be enabled unless all the bound interfaces supported
* hub-initiated LPM.
*/
- lpm_disable_error = usb_unlocked_disable_lpm(udev);
+ if (driver->disable_hub_initiated_lpm)
+ lpm_disable_error = usb_unlocked_disable_lpm(udev);
/*
* Terminate all URBs for this interface unless the driver
@@ -502,7 +506,7 @@ int usb_driver_claim_interface(struct usb_driver *driver,
struct device *dev;
struct usb_device *udev;
int retval = 0;
- int lpm_disable_error;
+ int lpm_disable_error = -ENODEV;
if (!iface)
return -ENODEV;
@@ -519,12 +523,14 @@ int usb_driver_claim_interface(struct usb_driver *driver,
iface->condition = USB_INTERFACE_BOUND;
- /* Disable LPM until this driver is bound. */
- lpm_disable_error = usb_unlocked_disable_lpm(udev);
- if (lpm_disable_error && driver->disable_hub_initiated_lpm) {
- dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
- __func__, driver->name);
- return -ENOMEM;
+ /* See the comment about disabling LPM in usb_probe_interface(). */
+ if (driver->disable_hub_initiated_lpm) {
+ lpm_disable_error = usb_unlocked_disable_lpm(udev);
+ if (lpm_disable_error) {
+ dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
+ __func__, driver->name);
+ return -ENOMEM;
+ }
}
/* Claimed interfaces are initially inactive (suspended) and
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 0009fc847eee..87cc0654b49e 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -915,7 +915,7 @@ static void usb_bus_init (struct usb_bus *bus)
bus->bandwidth_allocated = 0;
bus->bandwidth_int_reqs = 0;
bus->bandwidth_isoc_reqs = 0;
- mutex_init(&bus->usb_address0_mutex);
+ mutex_init(&bus->devnum_next_mutex);
INIT_LIST_HEAD (&bus->bus_list);
}
@@ -2447,6 +2447,14 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
return NULL;
}
if (primary_hcd == NULL) {
+ hcd->address0_mutex = kmalloc(sizeof(*hcd->address0_mutex),
+ GFP_KERNEL);
+ if (!hcd->address0_mutex) {
+ kfree(hcd);
+ dev_dbg(dev, "hcd address0 mutex alloc failed\n");
+ return NULL;
+ }
+ mutex_init(hcd->address0_mutex);
hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
GFP_KERNEL);
if (!hcd->bandwidth_mutex) {
@@ -2458,6 +2466,7 @@ struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
dev_set_drvdata(dev, hcd);
} else {
mutex_lock(&usb_port_peer_mutex);
+ hcd->address0_mutex = primary_hcd->address0_mutex;
hcd->bandwidth_mutex = primary_hcd->bandwidth_mutex;
hcd->primary_hcd = primary_hcd;
primary_hcd->primary_hcd = primary_hcd;
@@ -2524,8 +2533,10 @@ static void hcd_release(struct kref *kref)
struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
mutex_lock(&usb_port_peer_mutex);
- if (usb_hcd_is_primary_hcd(hcd))
+ if (usb_hcd_is_primary_hcd(hcd)) {
+ kfree(hcd->address0_mutex);
kfree(hcd->bandwidth_mutex);
+ }
if (hcd->shared_hcd) {
struct usb_hcd *peer = hcd->shared_hcd;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 74d856c7522b..ea9475fe20df 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2066,7 +2066,7 @@ static void choose_devnum(struct usb_device *udev)
struct usb_bus *bus = udev->bus;
/* be safe when more hub events are proceed in parallel */
- mutex_lock(&bus->usb_address0_mutex);
+ mutex_lock(&bus->devnum_next_mutex);
if (udev->wusb) {
devnum = udev->portnum + 1;
BUG_ON(test_bit(devnum, bus->devmap.devicemap));
@@ -2084,7 +2084,7 @@ static void choose_devnum(struct usb_device *udev)
set_bit(devnum, bus->devmap.devicemap);
udev->devnum = devnum;
}
- mutex_unlock(&bus->usb_address0_mutex);
+ mutex_unlock(&bus->devnum_next_mutex);
}
static void release_devnum(struct usb_device *udev)
@@ -4262,7 +4262,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
if (oldspeed == USB_SPEED_LOW)
delay = HUB_LONG_RESET_TIME;
- mutex_lock(&hdev->bus->usb_address0_mutex);
+ mutex_lock(hcd->address0_mutex);
/* Reset the device; full speed may morph to high speed */
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
@@ -4544,7 +4544,7 @@ fail:
hub_port_disable(hub, port1, 0);
update_devnum(udev, devnum); /* for disconnect processing */
}
- mutex_unlock(&hdev->bus->usb_address0_mutex);
+ mutex_unlock(hcd->address0_mutex);
return retval;
}
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 0bbafe795a72..bbddc44ce8bc 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -303,11 +303,20 @@ static unsigned mod_pattern;
module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
-static inline void simple_fill_buf(struct urb *urb)
+static unsigned get_maxpacket(struct usb_device *udev, int pipe)
+{
+ struct usb_host_endpoint *ep;
+
+ ep = usb_pipe_endpoint(udev, pipe);
+ return le16_to_cpup(&ep->desc.wMaxPacketSize);
+}
+
+static void simple_fill_buf(struct urb *urb)
{
unsigned i;
u8 *buf = urb->transfer_buffer;
unsigned len = urb->transfer_buffer_length;
+ unsigned maxpacket;
switch (pattern) {
default:
@@ -316,8 +325,9 @@ static inline void simple_fill_buf(struct urb *urb)
memset(buf, 0, len);
break;
case 1: /* mod63 */
+ maxpacket = get_maxpacket(urb->dev, urb->pipe);
for (i = 0; i < len; i++)
- *buf++ = (u8) (i % 63);
+ *buf++ = (u8) ((i % maxpacket) % 63);
break;
}
}
@@ -349,6 +359,7 @@ static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
u8 expected;
u8 *buf = urb->transfer_buffer;
unsigned len = urb->actual_length;
+ unsigned maxpacket = get_maxpacket(urb->dev, urb->pipe);
int ret = check_guard_bytes(tdev, urb);
if (ret)
@@ -366,7 +377,7 @@ static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
* with set_interface or set_config.
*/
case 1: /* mod63 */
- expected = i % 63;
+ expected = (i % maxpacket) % 63;
break;
/* always fail unsupported patterns */
default:
@@ -478,11 +489,14 @@ static void free_sglist(struct scatterlist *sg, int nents)
}
static struct scatterlist *
-alloc_sglist(int nents, int max, int vary)
+alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe)
{
struct scatterlist *sg;
+ unsigned int n_size = 0;
unsigned i;
unsigned size = max;
+ unsigned maxpacket =
+ get_maxpacket(interface_to_usbdev(dev->intf), pipe);
if (max == 0)
return NULL;
@@ -511,7 +525,8 @@ alloc_sglist(int nents, int max, int vary)
break;
case 1:
for (j = 0; j < size; j++)
- *buf++ = (u8) (j % 63);
+ *buf++ = (u8) (((j + n_size) % maxpacket) % 63);
+ n_size += size;
break;
}
@@ -2175,7 +2190,8 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
"TEST 5: write %d sglists %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
- sg = alloc_sglist(param->sglen, param->length, 0);
+ sg = alloc_sglist(param->sglen, param->length,
+ 0, dev, dev->out_pipe);
if (!sg) {
retval = -ENOMEM;
break;
@@ -2193,7 +2209,8 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
"TEST 6: read %d sglists %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
- sg = alloc_sglist(param->sglen, param->length, 0);
+ sg = alloc_sglist(param->sglen, param->length,
+ 0, dev, dev->in_pipe);
if (!sg) {
retval = -ENOMEM;
break;
@@ -2210,7 +2227,8 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
"TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
param->vary, param->iterations,
param->sglen, param->length);
- sg = alloc_sglist(param->sglen, param->length, param->vary);
+ sg = alloc_sglist(param->sglen, param->length,
+ param->vary, dev, dev->out_pipe);
if (!sg) {
retval = -ENOMEM;
break;
@@ -2227,7 +2245,8 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
"TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
param->vary, param->iterations,
param->sglen, param->length);
- sg = alloc_sglist(param->sglen, param->length, param->vary);
+ sg = alloc_sglist(param->sglen, param->length,
+ param->vary, dev, dev->in_pipe);
if (!sg) {
retval = -ENOMEM;
break;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 642125d27df4..685a90168d8e 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -43,8 +43,8 @@ static int cp210x_tiocmset(struct tty_struct *, unsigned int, unsigned int);
static int cp210x_tiocmset_port(struct usb_serial_port *port,
unsigned int, unsigned int);
static void cp210x_break_ctl(struct tty_struct *, int);
-static int cp210x_startup(struct usb_serial *);
-static void cp210x_release(struct usb_serial *);
+static int cp210x_port_probe(struct usb_serial_port *);
+static int cp210x_port_remove(struct usb_serial_port *);
static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
static const struct usb_device_id id_table[] = {
@@ -207,7 +207,7 @@ static const struct usb_device_id id_table[] = {
MODULE_DEVICE_TABLE(usb, id_table);
-struct cp210x_serial_private {
+struct cp210x_port_private {
__u8 bInterfaceNumber;
};
@@ -226,8 +226,8 @@ static struct usb_serial_driver cp210x_device = {
.set_termios = cp210x_set_termios,
.tiocmget = cp210x_tiocmget,
.tiocmset = cp210x_tiocmset,
- .attach = cp210x_startup,
- .release = cp210x_release,
+ .port_probe = cp210x_port_probe,
+ .port_remove = cp210x_port_remove,
.dtr_rts = cp210x_dtr_rts
};
@@ -321,7 +321,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
unsigned int *data, int size)
{
struct usb_serial *serial = port->serial;
- struct cp210x_serial_private *spriv = usb_get_serial_data(serial);
+ struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
__le32 *buf;
int result, i, length;
@@ -335,7 +335,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
/* Issue the request, attempting to read 'size' bytes */
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
request, REQTYPE_INTERFACE_TO_HOST, 0x0000,
- spriv->bInterfaceNumber, buf, size,
+ port_priv->bInterfaceNumber, buf, size,
USB_CTRL_GET_TIMEOUT);
/* Convert data into an array of integers */
@@ -366,7 +366,7 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
unsigned int *data, int size)
{
struct usb_serial *serial = port->serial;
- struct cp210x_serial_private *spriv = usb_get_serial_data(serial);
+ struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
__le32 *buf;
int result, i, length;
@@ -385,13 +385,13 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
request, REQTYPE_HOST_TO_INTERFACE, 0x0000,
- spriv->bInterfaceNumber, buf, size,
+ port_priv->bInterfaceNumber, buf, size,
USB_CTRL_SET_TIMEOUT);
} else {
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0),
request, REQTYPE_HOST_TO_INTERFACE, data[0],
- spriv->bInterfaceNumber, NULL, 0,
+ port_priv->bInterfaceNumber, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
@@ -785,7 +785,7 @@ static void cp210x_set_termios(struct tty_struct *tty,
} else {
modem_ctl[0] &= ~0x7B;
modem_ctl[0] |= 0x01;
- modem_ctl[1] |= 0x40;
+ modem_ctl[1] = 0x40;
dev_dbg(dev, "%s - flow control = NONE\n", __func__);
}
@@ -873,29 +873,32 @@ static void cp210x_break_ctl(struct tty_struct *tty, int break_state)
cp210x_set_config(port, CP210X_SET_BREAK, &state, 2);
}
-static int cp210x_startup(struct usb_serial *serial)
+static int cp210x_port_probe(struct usb_serial_port *port)
{
+ struct usb_serial *serial = port->serial;
struct usb_host_interface *cur_altsetting;
- struct cp210x_serial_private *spriv;
+ struct cp210x_port_private *port_priv;
- spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
- if (!spriv)
+ port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL);
+ if (!port_priv)
return -ENOMEM;
cur_altsetting = serial->interface->cur_altsetting;
- spriv->bInterfaceNumber = cur_altsetting->desc.bInterfaceNumber;
+ port_priv->bInterfaceNumber = cur_altsetting->desc.bInterfaceNumber;
- usb_set_serial_data(serial, spriv);
+ usb_set_serial_port_data(port, port_priv);
return 0;
}
-static void cp210x_release(struct usb_serial *serial)
+static int cp210x_port_remove(struct usb_serial_port *port)
{
- struct cp210x_serial_private *spriv;
+ struct cp210x_port_private *port_priv;
+
+ port_priv = usb_get_serial_port_data(port);
+ kfree(port_priv);
- spriv = usb_get_serial_data(serial);
- kfree(spriv);
+ return 0;
}
module_usb_serial_driver(serial_drivers, id_table);
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index c0866971db2b..1947ea0e0988 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -2856,14 +2856,16 @@ static int edge_startup(struct usb_serial *serial)
/* not set up yet, so do it now */
edge_serial->interrupt_read_urb =
usb_alloc_urb(0, GFP_KERNEL);
- if (!edge_serial->interrupt_read_urb)
- return -ENOMEM;
+ if (!edge_serial->interrupt_read_urb) {
+ response = -ENOMEM;
+ break;
+ }
edge_serial->interrupt_in_buffer =
kmalloc(buffer_size, GFP_KERNEL);
if (!edge_serial->interrupt_in_buffer) {
- usb_free_urb(edge_serial->interrupt_read_urb);
- return -ENOMEM;
+ response = -ENOMEM;
+ break;
}
edge_serial->interrupt_in_endpoint =
endpoint->bEndpointAddress;
@@ -2891,14 +2893,16 @@ static int edge_startup(struct usb_serial *serial)
/* not set up yet, so do it now */
edge_serial->read_urb =
usb_alloc_urb(0, GFP_KERNEL);
- if (!edge_serial->read_urb)
- return -ENOMEM;
+ if (!edge_serial->read_urb) {
+ response = -ENOMEM;
+ break;
+ }
edge_serial->bulk_in_buffer =
kmalloc(buffer_size, GFP_KERNEL);
if (!edge_serial->bulk_in_buffer) {
- usb_free_urb(edge_serial->read_urb);
- return -ENOMEM;
+ response = -ENOMEM;
+ break;
}
edge_serial->bulk_in_endpoint =
endpoint->bEndpointAddress;
@@ -2924,9 +2928,22 @@ static int edge_startup(struct usb_serial *serial)
}
}
- if (!interrupt_in_found || !bulk_in_found || !bulk_out_found) {
- dev_err(ddev, "Error - the proper endpoints were not found!\n");
- return -ENODEV;
+ if (response || !interrupt_in_found || !bulk_in_found ||
+ !bulk_out_found) {
+ if (!response) {
+ dev_err(ddev, "expected endpoints not found\n");
+ response = -ENODEV;
+ }
+
+ usb_free_urb(edge_serial->interrupt_read_urb);
+ kfree(edge_serial->interrupt_in_buffer);
+
+ usb_free_urb(edge_serial->read_urb);
+ kfree(edge_serial->bulk_in_buffer);
+
+ kfree(edge_serial);
+
+ return response;
}
/* start interrupt read for this edgeport this interrupt will
@@ -2949,16 +2966,9 @@ static void edge_disconnect(struct usb_serial *serial)
{
struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
- /* stop reads and writes on all ports */
- /* free up our endpoint stuff */
if (edge_serial->is_epic) {
usb_kill_urb(edge_serial->interrupt_read_urb);
- usb_free_urb(edge_serial->interrupt_read_urb);
- kfree(edge_serial->interrupt_in_buffer);
-
usb_kill_urb(edge_serial->read_urb);
- usb_free_urb(edge_serial->read_urb);
- kfree(edge_serial->bulk_in_buffer);
}
}
@@ -2971,6 +2981,16 @@ static void edge_release(struct usb_serial *serial)
{
struct edgeport_serial *edge_serial = usb_get_serial_data(serial);
+ if (edge_serial->is_epic) {
+ usb_kill_urb(edge_serial->interrupt_read_urb);
+ usb_free_urb(edge_serial->interrupt_read_urb);
+ kfree(edge_serial->interrupt_in_buffer);
+
+ usb_kill_urb(edge_serial->read_urb);
+ usb_free_urb(edge_serial->read_urb);
+ kfree(edge_serial->bulk_in_buffer);
+ }
+
kfree(edge_serial);
}
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index e07b15ed5814..7faa901ee47f 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -2376,6 +2376,10 @@ static void keyspan_release(struct usb_serial *serial)
s_priv = usb_get_serial_data(serial);
+ /* Make sure to unlink the URBs submitted in attach. */
+ usb_kill_urb(s_priv->instat_urb);
+ usb_kill_urb(s_priv->indat_urb);
+
usb_free_urb(s_priv->instat_urb);
usb_free_urb(s_priv->indat_urb);
usb_free_urb(s_priv->glocont_urb);
diff --git a/drivers/usb/serial/mxuport.c b/drivers/usb/serial/mxuport.c
index 460a40669967..d029b2fc0f75 100644
--- a/drivers/usb/serial/mxuport.c
+++ b/drivers/usb/serial/mxuport.c
@@ -1263,6 +1263,15 @@ static int mxuport_attach(struct usb_serial *serial)
return 0;
}
+static void mxuport_release(struct usb_serial *serial)
+{
+ struct usb_serial_port *port0 = serial->port[0];
+ struct usb_serial_port *port1 = serial->port[1];
+
+ usb_serial_generic_close(port1);
+ usb_serial_generic_close(port0);
+}
+
static int mxuport_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct mxuport_port *mxport = usb_get_serial_port_data(port);
@@ -1365,6 +1374,7 @@ static struct usb_serial_driver mxuport_device = {
.probe = mxuport_probe,
.port_probe = mxuport_port_probe,
.attach = mxuport_attach,
+ .release = mxuport_release,
.calc_num_ports = mxuport_calc_num_ports,
.open = mxuport_open,
.close = mxuport_close,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2a593d9232d7..d67b687f20db 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -376,18 +376,22 @@ static void option_instat_callback(struct urb *urb);
#define HAIER_PRODUCT_CE81B 0x10f8
#define HAIER_PRODUCT_CE100 0x2009
-/* Cinterion (formerly Siemens) products */
-#define SIEMENS_VENDOR_ID 0x0681
-#define CINTERION_VENDOR_ID 0x1e2d
+/* Gemalto's Cinterion products (formerly Siemens) */
+#define SIEMENS_VENDOR_ID 0x0681
+#define CINTERION_VENDOR_ID 0x1e2d
+#define CINTERION_PRODUCT_HC25_MDMNET 0x0040
#define CINTERION_PRODUCT_HC25_MDM 0x0047
-#define CINTERION_PRODUCT_HC25_MDMNET 0x0040
+#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
#define CINTERION_PRODUCT_HC28_MDM 0x004C
-#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
#define CINTERION_PRODUCT_EU3_E 0x0051
#define CINTERION_PRODUCT_EU3_P 0x0052
#define CINTERION_PRODUCT_PH8 0x0053
#define CINTERION_PRODUCT_AHXX 0x0055
#define CINTERION_PRODUCT_PLXX 0x0060
+#define CINTERION_PRODUCT_PH8_2RMNET 0x0082
+#define CINTERION_PRODUCT_PH8_AUDIO 0x0083
+#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
+#define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -642,6 +646,10 @@ static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
.reserved = BIT(1) | BIT(2) | BIT(3),
};
+static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
+ .reserved = BIT(4) | BIT(5),
+};
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1614,7 +1622,79 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff45, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff46, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff47, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff48, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff49, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4a, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4b, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4c, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4d, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4e, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4f, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff50, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff51, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff52, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff53, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff54, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff55, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff56, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff57, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff58, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff59, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5a, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5b, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5c, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5d, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5e, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5f, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff60, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff61, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff62, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff63, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff64, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff65, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff66, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff67, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff68, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff69, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6a, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6b, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6c, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6d, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6e, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6f, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff70, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff71, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff72, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff73, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff74, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff75, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff76, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff77, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff78, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff79, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7a, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7b, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7c, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7d, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7e, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7f, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff80, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff81, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff82, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff83, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff84, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff85, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff86, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff87, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff88, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff89, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8a, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) },
@@ -1625,6 +1705,61 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff9f, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa0, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa1, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa2, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa3, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa4, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa5, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa6, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa7, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa8, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa9, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaa, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffab, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffac, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffae, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaf, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb0, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb1, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb2, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb3, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb4, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb5, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb6, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb7, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb8, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb9, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffba, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbb, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbc, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbd, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbe, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbf, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc0, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc1, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc2, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc3, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc4, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc5, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc6, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc7, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc8, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc9, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffca, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcb, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcc, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcd, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffce, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcf, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd0, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd1, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd2, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd3, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd4, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd5, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffec, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffee, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff6, 0xff, 0xff, 0xff) },
@@ -1721,7 +1856,13 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
+ .driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index 504f5bff79c0..b18974cbd995 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -141,6 +141,7 @@ static void qt2_release(struct usb_serial *serial)
serial_priv = usb_get_serial_data(serial);
+ usb_kill_urb(serial_priv->read_urb);
usb_free_urb(serial_priv->read_urb);
kfree(serial_priv->read_buffer);
kfree(serial_priv);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 511aab3b9206..4bf7a34f6a4c 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -486,7 +486,8 @@ static void eoi_pirq(struct irq_data *data)
if (!VALID_EVTCHN(evtchn))
return;
- if (unlikely(irqd_is_setaffinity_pending(data))) {
+ if (unlikely(irqd_is_setaffinity_pending(data)) &&
+ likely(!irqd_irq_disabled(data))) {
int masked = test_and_set_mask(evtchn);
clear_evtchn(evtchn);
@@ -1373,7 +1374,8 @@ static void ack_dynirq(struct irq_data *data)
if (!VALID_EVTCHN(evtchn))
return;
- if (unlikely(irqd_is_setaffinity_pending(data))) {
+ if (unlikely(irqd_is_setaffinity_pending(data)) &&
+ likely(!irqd_irq_disabled(data))) {
int masked = test_and_set_mask(evtchn);
clear_evtchn(evtchn);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index ba5aec76e6f8..42d11e7cf7fe 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3866,6 +3866,7 @@ extern const struct dentry_operations btrfs_dentry_operations;
/* ioctl.c */
long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
void btrfs_update_iflags(struct inode *inode);
void btrfs_inherit_iflags(struct inode *inode, struct inode *dir);
int btrfs_is_empty_uuid(u8 *uuid);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index e557e4ca0392..2ad4cb3da8f6 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2797,7 +2797,7 @@ const struct file_operations btrfs_file_operations = {
.fallocate = btrfs_fallocate,
.unlocked_ioctl = btrfs_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = btrfs_ioctl,
+ .compat_ioctl = btrfs_compat_ioctl,
#endif
};
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index c8d287fff7bc..ef677cdf777f 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -9513,7 +9513,7 @@ static const struct file_operations btrfs_dir_file_operations = {
.iterate = btrfs_real_readdir,
.unlocked_ioctl = btrfs_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = btrfs_ioctl,
+ .compat_ioctl = btrfs_compat_ioctl,
#endif
.release = btrfs_release_file,
.fsync = btrfs_sync_file,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 31c9f6471ce7..1c1ee12ab0cf 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -5495,3 +5495,24 @@ long btrfs_ioctl(struct file *file, unsigned int
return -ENOTTY;
}
+
+#ifdef CONFIG_COMPAT
+long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ switch (cmd) {
+ case FS_IOC32_GETFLAGS:
+ cmd = FS_IOC_GETFLAGS;
+ break;
+ case FS_IOC32_SETFLAGS:
+ cmd = FS_IOC_SETFLAGS;
+ break;
+ case FS_IOC32_GETVERSION:
+ cmd = FS_IOC_GETVERSION;
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+
+ return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index f4cf200b3c76..79450fa66d16 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -24,10 +24,13 @@
#include <linux/string.h>
#include <keys/user-type.h>
#include <linux/key-type.h>
+#include <linux/keyctl.h>
#include <linux/inet.h>
#include "cifsglob.h"
#include "cifs_spnego.h"
#include "cifs_debug.h"
+#include "cifsproto.h"
+static const struct cred *spnego_cred;
/* create a new cifs key */
static int
@@ -102,6 +105,7 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo)
size_t desc_len;
struct key *spnego_key;
const char *hostname = server->hostname;
+ const struct cred *saved_cred;
/* length of fields (with semicolons): ver=0xyz ip4=ipaddress
host=hostname sec=mechanism uid=0xFF user=username */
@@ -163,7 +167,9 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo)
sprintf(dp, ";pid=0x%x", current->pid);
cifs_dbg(FYI, "key description = %s\n", description);
+ saved_cred = override_creds(spnego_cred);
spnego_key = request_key(&cifs_spnego_key_type, description, "");
+ revert_creds(saved_cred);
#ifdef CONFIG_CIFS_DEBUG2
if (cifsFYI && !IS_ERR(spnego_key)) {
@@ -177,3 +183,64 @@ out:
kfree(description);
return spnego_key;
}
+
+int
+init_cifs_spnego(void)
+{
+ struct cred *cred;
+ struct key *keyring;
+ int ret;
+
+ cifs_dbg(FYI, "Registering the %s key type\n",
+ cifs_spnego_key_type.name);
+
+ /*
+ * Create an override credential set with special thread keyring for
+ * spnego upcalls.
+ */
+
+ cred = prepare_kernel_cred(NULL);
+ if (!cred)
+ return -ENOMEM;
+
+ keyring = keyring_alloc(".cifs_spnego",
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ,
+ KEY_ALLOC_NOT_IN_QUOTA, NULL);
+ if (IS_ERR(keyring)) {
+ ret = PTR_ERR(keyring);
+ goto failed_put_cred;
+ }
+
+ ret = register_key_type(&cifs_spnego_key_type);
+ if (ret < 0)
+ goto failed_put_key;
+
+ /*
+ * instruct request_key() to use this special keyring as a cache for
+ * the results it looks up
+ */
+ set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
+ cred->thread_keyring = keyring;
+ cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
+ spnego_cred = cred;
+
+ cifs_dbg(FYI, "cifs spnego keyring: %d\n", key_serial(keyring));
+ return 0;
+
+failed_put_key:
+ key_put(keyring);
+failed_put_cred:
+ put_cred(cred);
+ return ret;
+}
+
+void
+exit_cifs_spnego(void)
+{
+ key_revoke(spnego_cred->thread_keyring);
+ unregister_key_type(&cifs_spnego_key_type);
+ put_cred(spnego_cred);
+ cifs_dbg(FYI, "Unregistered %s key type\n", cifs_spnego_key_type.name);
+}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 9d7996e8e793..3e924abdd969 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1249,7 +1249,7 @@ init_cifs(void)
goto out_destroy_mids;
#ifdef CONFIG_CIFS_UPCALL
- rc = register_key_type(&cifs_spnego_key_type);
+ rc = init_cifs_spnego();
if (rc)
goto out_destroy_request_bufs;
#endif /* CONFIG_CIFS_UPCALL */
@@ -1272,7 +1272,7 @@ out_init_cifs_idmap:
out_register_key_type:
#endif
#ifdef CONFIG_CIFS_UPCALL
- unregister_key_type(&cifs_spnego_key_type);
+ exit_cifs_spnego();
out_destroy_request_bufs:
#endif
cifs_destroy_request_bufs();
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index c31ce98c1704..5b868060eab8 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -60,6 +60,8 @@ do { \
} while (0)
extern int init_cifs_idmap(void);
extern void exit_cifs_idmap(void);
+extern int init_cifs_spnego(void);
+extern void exit_cifs_spnego(void);
extern char *build_path_from_dentry(struct dentry *);
extern char *cifs_build_path_to_root(struct smb_vol *vol,
struct cifs_sb_info *cifs_sb,
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 57db63ff88da..fe423e18450f 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -400,19 +400,27 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
sec_blob->LmChallengeResponse.MaximumLength = 0;
sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
- rc = setup_ntlmv2_rsp(ses, nls_cp);
- if (rc) {
- cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
- goto setup_ntlmv2_ret;
+ if (ses->user_name != NULL) {
+ rc = setup_ntlmv2_rsp(ses, nls_cp);
+ if (rc) {
+ cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
+ goto setup_ntlmv2_ret;
+ }
+ memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+ ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+ tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
+
+ sec_blob->NtChallengeResponse.Length =
+ cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+ sec_blob->NtChallengeResponse.MaximumLength =
+ cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+ } else {
+ /*
+ * don't send an NT Response for anonymous access
+ */
+ sec_blob->NtChallengeResponse.Length = 0;
+ sec_blob->NtChallengeResponse.MaximumLength = 0;
}
- memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
- ses->auth_key.len - CIFS_SESS_KEY_SIZE);
- tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
-
- sec_blob->NtChallengeResponse.Length =
- cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
- sec_blob->NtChallengeResponse.MaximumLength =
- cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
if (ses->domainName == NULL) {
sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
@@ -670,20 +678,24 @@ sess_auth_lanman(struct sess_data *sess_data)
pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
- /* no capabilities flags in old lanman negotiation */
- pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
-
- /* Calculate hash with password and copy into bcc_ptr.
- * Encryption Key (stored as in cryptkey) gets used if the
- * security mode bit in Negottiate Protocol response states
- * to use challenge/response method (i.e. Password bit is 1).
- */
- rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
- ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
- true : false, lnm_session_key);
-
- memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
- bcc_ptr += CIFS_AUTH_RESP_SIZE;
+ if (ses->user_name != NULL) {
+ /* no capabilities flags in old lanman negotiation */
+ pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
+
+ /* Calculate hash with password and copy into bcc_ptr.
+ * Encryption Key (stored as in cryptkey) gets used if the
+ * security mode bit in Negottiate Protocol response states
+ * to use challenge/response method (i.e. Password bit is 1).
+ */
+ rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
+ ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
+ true : false, lnm_session_key);
+
+ memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
+ bcc_ptr += CIFS_AUTH_RESP_SIZE;
+ } else {
+ pSMB->old_req.PasswordLength = 0;
+ }
/*
* can not sign if LANMAN negotiated so no need
@@ -769,26 +781,31 @@ sess_auth_ntlm(struct sess_data *sess_data)
capabilities = cifs_ssetup_hdr(ses, pSMB);
pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
- pSMB->req_no_secext.CaseInsensitivePasswordLength =
- cpu_to_le16(CIFS_AUTH_RESP_SIZE);
- pSMB->req_no_secext.CaseSensitivePasswordLength =
- cpu_to_le16(CIFS_AUTH_RESP_SIZE);
-
- /* calculate ntlm response and session key */
- rc = setup_ntlm_response(ses, sess_data->nls_cp);
- if (rc) {
- cifs_dbg(VFS, "Error %d during NTLM authentication\n",
- rc);
- goto out;
- }
+ if (ses->user_name != NULL) {
+ pSMB->req_no_secext.CaseInsensitivePasswordLength =
+ cpu_to_le16(CIFS_AUTH_RESP_SIZE);
+ pSMB->req_no_secext.CaseSensitivePasswordLength =
+ cpu_to_le16(CIFS_AUTH_RESP_SIZE);
+
+ /* calculate ntlm response and session key */
+ rc = setup_ntlm_response(ses, sess_data->nls_cp);
+ if (rc) {
+ cifs_dbg(VFS, "Error %d during NTLM authentication\n",
+ rc);
+ goto out;
+ }
- /* copy ntlm response */
- memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
- CIFS_AUTH_RESP_SIZE);
- bcc_ptr += CIFS_AUTH_RESP_SIZE;
- memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
- CIFS_AUTH_RESP_SIZE);
- bcc_ptr += CIFS_AUTH_RESP_SIZE;
+ /* copy ntlm response */
+ memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+ CIFS_AUTH_RESP_SIZE);
+ bcc_ptr += CIFS_AUTH_RESP_SIZE;
+ memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+ CIFS_AUTH_RESP_SIZE);
+ bcc_ptr += CIFS_AUTH_RESP_SIZE;
+ } else {
+ pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
+ pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
+ }
if (ses->capabilities & CAP_UNICODE) {
/* unicode strings must be word aligned */
@@ -878,22 +895,26 @@ sess_auth_ntlmv2(struct sess_data *sess_data)
/* LM2 password would be here if we supported it */
pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
- /* calculate nlmv2 response and session key */
- rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
- if (rc) {
- cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
- goto out;
- }
+ if (ses->user_name != NULL) {
+ /* calculate nlmv2 response and session key */
+ rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp);
+ if (rc) {
+ cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc);
+ goto out;
+ }
- memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
- ses->auth_key.len - CIFS_SESS_KEY_SIZE);
- bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
+ memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
+ ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+ bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
- /* set case sensitive password length after tilen may get
- * assigned, tilen is 0 otherwise.
- */
- pSMB->req_no_secext.CaseSensitivePasswordLength =
- cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+ /* set case sensitive password length after tilen may get
+ * assigned, tilen is 0 otherwise.
+ */
+ pSMB->req_no_secext.CaseSensitivePasswordLength =
+ cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE);
+ } else {
+ pSMB->req_no_secext.CaseSensitivePasswordLength = 0;
+ }
if (ses->capabilities & CAP_UNICODE) {
if (sess_data->iov[0].iov_len % 2) {
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
index bc0bb9c34f72..0ffa18094335 100644
--- a/fs/cifs/smb2glob.h
+++ b/fs/cifs/smb2glob.h
@@ -44,6 +44,7 @@
#define SMB2_OP_DELETE 7
#define SMB2_OP_HARDLINK 8
#define SMB2_OP_SET_EOF 9
+#define SMB2_OP_RMDIR 10
/* Used when constructing chained read requests. */
#define CHAINED_REQUEST 1
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 899bbc86f73e..4f0231e685a9 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -80,6 +80,10 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
* SMB2_open() call.
*/
break;
+ case SMB2_OP_RMDIR:
+ tmprc = SMB2_rmdir(xid, tcon, fid.persistent_fid,
+ fid.volatile_fid);
+ break;
case SMB2_OP_RENAME:
tmprc = SMB2_rename(xid, tcon, fid.persistent_fid,
fid.volatile_fid, (__le16 *)data);
@@ -191,8 +195,8 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
struct cifs_sb_info *cifs_sb)
{
return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
- CREATE_NOT_FILE | CREATE_DELETE_ON_CLOSE,
- NULL, SMB2_OP_DELETE);
+ CREATE_NOT_FILE,
+ NULL, SMB2_OP_RMDIR);
}
int
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 3398ac7e7b37..e8d1f8c59b56 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -2336,6 +2336,22 @@ SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
}
int
+SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid)
+{
+ __u8 delete_pending = 1;
+ void *data;
+ unsigned int size;
+
+ data = &delete_pending;
+ size = 1; /* sizeof __u8 */
+
+ return send_set_info(xid, tcon, persistent_fid, volatile_fid,
+ current->tgid, FILE_DISPOSITION_INFORMATION, 1, &data,
+ &size);
+}
+
+int
SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
{
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 79dc650c18b2..9bc59f9c12fb 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -140,6 +140,8 @@ extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
extern int SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
__le16 *target_file);
+extern int SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid);
extern int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
__le16 *target_file);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index ac644c31ca67..9f230e589ecc 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1090,22 +1090,20 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
ext4_group_t block_group;
int bit;
- struct buffer_head *bitmap_bh;
+ struct buffer_head *bitmap_bh = NULL;
struct inode *inode = NULL;
- long err = -EIO;
+ int err = -EIO;
- /* Error cases - e2fsck has already cleaned up for us */
- if (ino > max_ino) {
- ext4_warning(sb, "bad orphan ino %lu! e2fsck was run?", ino);
- goto error;
- }
+ if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
+ goto bad_orphan;
block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
if (!bitmap_bh) {
- ext4_warning(sb, "inode bitmap error for orphan %lu", ino);
- goto error;
+ ext4_error(sb, "inode bitmap error %ld for orphan %lu",
+ ino, PTR_ERR(bitmap_bh));
+ return (struct inode *) bitmap_bh;
}
/* Having the inode bit set should be a 100% indicator that this
@@ -1116,15 +1114,21 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
goto bad_orphan;
inode = ext4_iget(sb, ino);
- if (IS_ERR(inode))
- goto iget_failed;
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
+ ino, err);
+ return inode;
+ }
/*
- * If the orphans has i_nlinks > 0 then it should be able to be
- * truncated, otherwise it won't be removed from the orphan list
- * during processing and an infinite loop will result.
+ * If the orphans has i_nlinks > 0 then it should be able to
+ * be truncated, otherwise it won't be removed from the orphan
+ * list during processing and an infinite loop will result.
+ * Similarly, it must not be a bad inode.
*/
- if (inode->i_nlink && !ext4_can_truncate(inode))
+ if ((inode->i_nlink && !ext4_can_truncate(inode)) ||
+ is_bad_inode(inode))
goto bad_orphan;
if (NEXT_ORPHAN(inode) > max_ino)
@@ -1132,29 +1136,25 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
brelse(bitmap_bh);
return inode;
-iget_failed:
- err = PTR_ERR(inode);
- inode = NULL;
bad_orphan:
- ext4_warning(sb, "bad orphan inode %lu! e2fsck was run?", ino);
- printk(KERN_WARNING "ext4_test_bit(bit=%d, block=%llu) = %d\n",
- bit, (unsigned long long)bitmap_bh->b_blocknr,
- ext4_test_bit(bit, bitmap_bh->b_data));
- printk(KERN_WARNING "inode=%p\n", inode);
+ ext4_error(sb, "bad orphan inode %lu", ino);
+ if (bitmap_bh)
+ printk(KERN_ERR "ext4_test_bit(bit=%d, block=%llu) = %d\n",
+ bit, (unsigned long long)bitmap_bh->b_blocknr,
+ ext4_test_bit(bit, bitmap_bh->b_data));
if (inode) {
- printk(KERN_WARNING "is_bad_inode(inode)=%d\n",
+ printk(KERN_ERR "is_bad_inode(inode)=%d\n",
is_bad_inode(inode));
- printk(KERN_WARNING "NEXT_ORPHAN(inode)=%u\n",
+ printk(KERN_ERR "NEXT_ORPHAN(inode)=%u\n",
NEXT_ORPHAN(inode));
- printk(KERN_WARNING "max_ino=%lu\n", max_ino);
- printk(KERN_WARNING "i_nlink=%u\n", inode->i_nlink);
+ printk(KERN_ERR "max_ino=%lu\n", max_ino);
+ printk(KERN_ERR "i_nlink=%u\n", inode->i_nlink);
/* Avoid freeing blocks if we got a bad deleted inode */
if (inode->i_nlink == 0)
inode->i_blocks = 0;
iput(inode);
}
brelse(bitmap_bh);
-error:
return ERR_PTR(err);
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 99c8e38ffb7b..dee06cd428eb 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1248,6 +1248,7 @@ static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
{
int order = 1;
+ int bb_incr = 1 << (e4b->bd_blkbits - 1);
void *bb;
BUG_ON(e4b->bd_bitmap == e4b->bd_buddy);
@@ -1260,7 +1261,8 @@ static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
/* this block is part of buddy of order 'order' */
return order;
}
- bb += 1 << (e4b->bd_blkbits - order);
+ bb += bb_incr;
+ bb_incr >>= 1;
order++;
}
return 0;
@@ -2553,7 +2555,7 @@ int ext4_mb_init(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
unsigned i, j;
- unsigned offset;
+ unsigned offset, offset_incr;
unsigned max;
int ret;
@@ -2582,11 +2584,13 @@ int ext4_mb_init(struct super_block *sb)
i = 1;
offset = 0;
+ offset_incr = 1 << (sb->s_blocksize_bits - 1);
max = sb->s_blocksize << 2;
do {
sbi->s_mb_offsets[i] = offset;
sbi->s_mb_maxs[i] = max;
- offset += 1 << (sb->s_blocksize_bits - i);
+ offset += offset_incr;
+ offset_incr = offset_incr >> 1;
max = max >> 1;
i++;
} while (i <= sb->s_blocksize_bits + 1);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 447486425b8c..d3a22a11ac76 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2598,7 +2598,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
* list entries can cause panics at unmount time.
*/
mutex_lock(&sbi->s_orphan_lock);
- list_del(&EXT4_I(inode)->i_orphan);
+ list_del_init(&EXT4_I(inode)->i_orphan);
mutex_unlock(&sbi->s_orphan_lock);
}
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 69aa378e60d9..af33fb77196f 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -447,58 +447,91 @@ struct mem_size_stats {
u64 pss;
};
+static void smaps_account(struct mem_size_stats *mss, struct page *page,
+ unsigned long size, bool young, bool dirty)
+{
+ int mapcount;
+
+ if (PageAnon(page))
+ mss->anonymous += size;
-static void smaps_pte_entry(pte_t ptent, unsigned long addr,
- unsigned long ptent_size, struct mm_walk *walk)
+ mss->resident += size;
+ /* Accumulate the size in pages that have been accessed. */
+ if (young || PageReferenced(page))
+ mss->referenced += size;
+ mapcount = page_mapcount(page);
+ if (mapcount >= 2) {
+ u64 pss_delta;
+
+ if (dirty || PageDirty(page))
+ mss->shared_dirty += size;
+ else
+ mss->shared_clean += size;
+ pss_delta = (u64)size << PSS_SHIFT;
+ do_div(pss_delta, mapcount);
+ mss->pss += pss_delta;
+ } else {
+ if (dirty || PageDirty(page))
+ mss->private_dirty += size;
+ else
+ mss->private_clean += size;
+ mss->pss += (u64)size << PSS_SHIFT;
+ }
+}
+
+static void smaps_pte_entry(pte_t *pte, unsigned long addr,
+ struct mm_walk *walk)
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = mss->vma;
pgoff_t pgoff = linear_page_index(vma, addr);
struct page *page = NULL;
- int mapcount;
- if (pte_present(ptent)) {
- page = vm_normal_page(vma, addr, ptent);
- } else if (is_swap_pte(ptent)) {
- swp_entry_t swpent = pte_to_swp_entry(ptent);
+ if (pte_present(*pte)) {
+ page = vm_normal_page(vma, addr, *pte);
+ } else if (is_swap_pte(*pte)) {
+ swp_entry_t swpent = pte_to_swp_entry(*pte);
if (!non_swap_entry(swpent))
- mss->swap += ptent_size;
+ mss->swap += PAGE_SIZE;
else if (is_migration_entry(swpent))
page = migration_entry_to_page(swpent);
- } else if (pte_file(ptent)) {
- if (pte_to_pgoff(ptent) != pgoff)
- mss->nonlinear += ptent_size;
+ } else if (pte_file(*pte)) {
+ if (pte_to_pgoff(*pte) != pgoff)
+ mss->nonlinear += PAGE_SIZE;
}
if (!page)
return;
- if (PageAnon(page))
- mss->anonymous += ptent_size;
-
if (page->index != pgoff)
- mss->nonlinear += ptent_size;
+ mss->nonlinear += PAGE_SIZE;
- mss->resident += ptent_size;
- /* Accumulate the size in pages that have been accessed. */
- if (pte_young(ptent) || PageReferenced(page))
- mss->referenced += ptent_size;
- mapcount = page_mapcount(page);
- if (mapcount >= 2) {
- if (pte_dirty(ptent) || PageDirty(page))
- mss->shared_dirty += ptent_size;
- else
- mss->shared_clean += ptent_size;
- mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
- } else {
- if (pte_dirty(ptent) || PageDirty(page))
- mss->private_dirty += ptent_size;
- else
- mss->private_clean += ptent_size;
- mss->pss += (ptent_size << PSS_SHIFT);
- }
+ smaps_account(mss, page, PAGE_SIZE, pte_young(*pte), pte_dirty(*pte));
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
+ struct mm_walk *walk)
+{
+ struct mem_size_stats *mss = walk->private;
+ struct vm_area_struct *vma = mss->vma;
+ struct page *page;
+
+ /* FOLL_DUMP will return -EFAULT on huge zero page */
+ page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
+ if (IS_ERR_OR_NULL(page))
+ return;
+ mss->anonymous_thp += HPAGE_PMD_SIZE;
+ smaps_account(mss, page, HPAGE_PMD_SIZE,
+ pmd_young(*pmd), pmd_dirty(*pmd));
}
+#else
+static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
+ struct mm_walk *walk)
+{
+}
+#endif
static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct mm_walk *walk)
@@ -509,9 +542,8 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
spinlock_t *ptl;
if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
- smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
+ smaps_pmd_entry(pmd, addr, walk);
spin_unlock(ptl);
- mss->anonymous_thp += HPAGE_PMD_SIZE;
return 0;
}
@@ -524,7 +556,7 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
*/
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE)
- smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
+ smaps_pte_entry(pte, addr, walk);
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
return 0;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 32e95c76cbd0..44e5598744b1 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2969,13 +2969,14 @@ xfs_iflush_cluster(
* We need to check under the i_flags_lock for a valid inode
* here. Skip it if it is not valid or the wrong inode.
*/
- spin_lock(&ip->i_flags_lock);
- if (!ip->i_ino ||
+ spin_lock(&iq->i_flags_lock);
+ if (!iq->i_ino ||
+ __xfs_iflags_test(iq, XFS_ISTALE) ||
(XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index) {
- spin_unlock(&ip->i_flags_lock);
+ spin_unlock(&iq->i_flags_lock);
continue;
}
- spin_unlock(&ip->i_flags_lock);
+ spin_unlock(&iq->i_flags_lock);
/*
* Do an un-protected check to see if the inode is dirty and
@@ -3091,7 +3092,7 @@ xfs_iflush(
struct xfs_buf **bpp)
{
struct xfs_mount *mp = ip->i_mount;
- struct xfs_buf *bp;
+ struct xfs_buf *bp = NULL;
struct xfs_dinode *dip;
int error;
@@ -3133,14 +3134,22 @@ xfs_iflush(
}
/*
- * Get the buffer containing the on-disk inode.
+ * Get the buffer containing the on-disk inode. We are doing a try-lock
+ * operation here, so we may get an EAGAIN error. In that case, we
+ * simply want to return with the inode still dirty.
+ *
+ * If we get any other error, we effectively have a corruption situation
+ * and we cannot flush the inode, so we treat it the same as failing
+ * xfs_iflush_int().
*/
error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
0);
- if (error || !bp) {
+ if (error == -EAGAIN) {
xfs_ifunlock(ip);
return error;
}
+ if (error)
+ goto corrupt_out;
/*
* First flush out the inode that xfs_iflush was called with.
@@ -3168,7 +3177,8 @@ xfs_iflush(
return 0;
corrupt_out:
- xfs_buf_relse(bp);
+ if (bp)
+ xfs_buf_relse(bp);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
cluster_corrupt_out:
error = -EFSCORRUPTED;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 9f622feda6a4..73b6c8fca308 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1248,6 +1248,22 @@ xfs_fs_remount(
/* ro -> rw */
if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
+ if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
+ xfs_warn(mp,
+ "ro->rw transition prohibited on norecovery mount");
+ return -EINVAL;
+ }
+
+ if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
+ xfs_sb_has_ro_compat_feature(sbp,
+ XFS_SB_FEAT_RO_COMPAT_UNKNOWN)) {
+ xfs_warn(mp,
+"ro->rw transition prohibited on unknown (0x%x) ro-compat filesystem",
+ (sbp->sb_features_ro_compat &
+ XFS_SB_FEAT_RO_COMPAT_UNKNOWN));
+ return -EINVAL;
+ }
+
mp->m_flags &= ~XFS_MOUNT_RDONLY;
/*
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index b37ea95bc348..71e37afd7290 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -39,8 +39,11 @@ struct can_priv {
struct can_clock clock;
enum can_state state;
- u32 ctrlmode;
- u32 ctrlmode_supported;
+
+ /* CAN controller features - see include/uapi/linux/can/netlink.h */
+ u32 ctrlmode; /* current options setting */
+ u32 ctrlmode_supported; /* options that can be modified by netlink */
+ u32 ctrlmode_static; /* static enabled options for driver/hardware */
int restart_ms;
struct timer_list restart_timer;
@@ -105,6 +108,21 @@ static inline bool can_is_canfd_skb(const struct sk_buff *skb)
return skb->len == CANFD_MTU;
}
+/* helper to define static CAN controller features at device creation time */
+static inline void can_set_static_ctrlmode(struct net_device *dev,
+ u32 static_mode)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* alloc_candev() succeeded => netdev_priv() is valid at this point */
+ priv->ctrlmode = static_mode;
+ priv->ctrlmode_static = static_mode;
+
+ /* override MTU which was set by default in can_setup()? */
+ if (static_mode & CAN_CTRLMODE_FD)
+ dev->mtu = CANFD_MTU;
+}
+
/* get data length from can_dlc with sanitized can_dlc */
u8 can_dlc2len(u8 can_dlc);
diff --git a/include/linux/usb.h b/include/linux/usb.h
index bdbd19fb1ff8..0f963c15d0cd 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -367,14 +367,13 @@ struct usb_bus {
int devnum_next; /* Next open device number in
* round-robin allocation */
+ struct mutex devnum_next_mutex; /* devnum_next mutex */
struct usb_devmap devmap; /* device address allocation map */
struct usb_device *root_hub; /* Root hub */
struct usb_bus *hs_companion; /* Companion EHCI bus, if any */
struct list_head bus_list; /* list of busses */
- struct mutex usb_address0_mutex; /* unaddressed device mutex */
-
int bandwidth_allocated; /* on this bus: how much of the time
* reserved for periodic (intr/iso)
* requests is used, on average?
@@ -1060,7 +1059,7 @@ struct usbdrv_wrap {
* for interfaces bound to this driver.
* @soft_unbind: if set to 1, the USB core will not kill URBs and disable
* endpoints before calling the driver's disconnect method.
- * @disable_hub_initiated_lpm: if set to 0, the USB core will not allow hubs
+ * @disable_hub_initiated_lpm: if set to 1, the USB core will not allow hubs
* to initiate lower power link state transitions when an idle timeout
* occurs. Device-initiated USB 3.0 link PM will still be allowed.
*
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 2f48e1756cbd..a4ef2e7c243a 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -169,6 +169,7 @@ struct usb_hcd {
* bandwidth_mutex should be dropped after a successful control message
* to the device, or resetting the bandwidth after a failed attempt.
*/
+ struct mutex *address0_mutex;
struct mutex *bandwidth_mutex;
struct usb_hcd *shared_hcd;
struct usb_hcd *primary_hcd;
diff --git a/kernel/exit.c b/kernel/exit.c
index 2116aace6c85..654021d9f1b3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -930,17 +930,28 @@ static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
task_pid_type(p, wo->wo_type) == wo->wo_pid;
}
-static int eligible_child(struct wait_opts *wo, struct task_struct *p)
+static int
+eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
{
if (!eligible_pid(wo, p))
return 0;
- /* Wait for all children (clone and not) if __WALL is set;
- * otherwise, wait for clone children *only* if __WCLONE is
- * set; otherwise, wait for non-clone children *only*. (Note:
- * A "clone" child here is one that reports to its parent
- * using a signal other than SIGCHLD.) */
- if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
- && !(wo->wo_flags & __WALL))
+
+ /*
+ * Wait for all children (clone and not) if __WALL is set or
+ * if it is traced by us.
+ */
+ if (ptrace || (wo->wo_flags & __WALL))
+ return 1;
+
+ /*
+ * Otherwise, wait for clone children *only* if __WCLONE is set;
+ * otherwise, wait for non-clone children *only*.
+ *
+ * Note: a "clone" child here is one that reports to its parent
+ * using a signal other than SIGCHLD, or a non-leader thread which
+ * we can only see if it is traced by us.
+ */
+ if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
return 0;
return 1;
@@ -1313,7 +1324,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
if (unlikely(exit_state == EXIT_DEAD))
return 0;
- ret = eligible_child(wo, p);
+ ret = eligible_child(wo, ptrace, p);
if (!ret)
return ret;
diff --git a/kernel/sched/proc.c b/kernel/sched/proc.c
index 8ecd552fe4f2..09dfe51f89b5 100644
--- a/kernel/sched/proc.c
+++ b/kernel/sched/proc.c
@@ -97,10 +97,13 @@ long calc_load_fold_active(struct rq *this_rq)
static unsigned long
calc_load(unsigned long load, unsigned long exp, unsigned long active)
{
- load *= exp;
- load += active * (FIXED_1 - exp);
- load += 1UL << (FSHIFT - 1);
- return load >> FSHIFT;
+ unsigned long newload;
+
+ newload = load * exp + active * (FIXED_1 - exp);
+ if (active >= load)
+ newload += FIXED_1-1;
+
+ return newload / FIXED_1;
}
#ifdef CONFIG_NO_HZ_COMMON
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 0fc5cfedcc8c..d4588b08e07a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -466,7 +466,8 @@ struct ring_buffer_per_cpu {
raw_spinlock_t reader_lock; /* serialize readers */
arch_spinlock_t lock;
struct lock_class_key lock_key;
- unsigned int nr_pages;
+ unsigned long nr_pages;
+ unsigned int current_context;
struct list_head *pages;
struct buffer_page *head_page; /* read from head */
struct buffer_page *tail_page; /* write to tail */
@@ -486,7 +487,7 @@ struct ring_buffer_per_cpu {
u64 write_stamp;
u64 read_stamp;
/* ring buffer pages to update, > 0 to add, < 0 to remove */
- int nr_pages_to_update;
+ long nr_pages_to_update;
struct list_head new_pages; /* new pages to add */
struct work_struct update_pages_work;
struct completion update_done;
@@ -1165,10 +1166,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
return 0;
}
-static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
+static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
{
- int i;
struct buffer_page *bpage, *tmp;
+ long i;
for (i = 0; i < nr_pages; i++) {
struct page *page;
@@ -1205,7 +1206,7 @@ free_pages:
}
static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
- unsigned nr_pages)
+ unsigned long nr_pages)
{
LIST_HEAD(pages);
@@ -1230,7 +1231,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
}
static struct ring_buffer_per_cpu *
-rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
+rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
{
struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage;
@@ -1330,8 +1331,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
struct lock_class_key *key)
{
struct ring_buffer *buffer;
+ long nr_pages;
int bsize;
- int cpu, nr_pages;
+ int cpu;
/* keep it in its own cache line */
buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
@@ -1457,12 +1459,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
}
static int
-rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
+rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
{
struct list_head *tail_page, *to_remove, *next_page;
struct buffer_page *to_remove_page, *tmp_iter_page;
struct buffer_page *last_page, *first_page;
- unsigned int nr_removed;
+ unsigned long nr_removed;
unsigned long head_bit;
int page_entries;
@@ -1679,7 +1681,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
int cpu_id)
{
struct ring_buffer_per_cpu *cpu_buffer;
- unsigned nr_pages;
+ unsigned long nr_pages;
int cpu, err = 0;
/*
@@ -1693,14 +1695,13 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
!cpumask_test_cpu(cpu_id, buffer->cpumask))
return size;
- size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
- size *= BUF_PAGE_SIZE;
+ nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
/* we need a minimum of two pages */
- if (size < BUF_PAGE_SIZE * 2)
- size = BUF_PAGE_SIZE * 2;
+ if (nr_pages < 2)
+ nr_pages = 2;
- nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
+ size = nr_pages * BUF_PAGE_SIZE;
/*
* Don't succeed if resizing is disabled, as a reader might be
@@ -2680,11 +2681,11 @@ rb_reserve_next_event(struct ring_buffer *buffer,
* just so happens that it is the same bit corresponding to
* the current context.
*/
-static DEFINE_PER_CPU(unsigned int, current_context);
-static __always_inline int trace_recursive_lock(void)
+static __always_inline int
+trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
{
- unsigned int val = __this_cpu_read(current_context);
+ unsigned int val = cpu_buffer->current_context;
int bit;
if (in_interrupt()) {
@@ -2701,23 +2702,21 @@ static __always_inline int trace_recursive_lock(void)
return 1;
val |= (1 << bit);
- __this_cpu_write(current_context, val);
+ cpu_buffer->current_context = val;
return 0;
}
-static __always_inline void trace_recursive_unlock(void)
+static __always_inline void
+trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
{
- unsigned int val = __this_cpu_read(current_context);
-
- val &= val & (val - 1);
- __this_cpu_write(current_context, val);
+ cpu_buffer->current_context &= cpu_buffer->current_context - 1;
}
#else
-#define trace_recursive_lock() (0)
-#define trace_recursive_unlock() do { } while (0)
+#define trace_recursive_lock(cpu_buffer) (0)
+#define trace_recursive_unlock(cpu_buffer) do { } while (0)
#endif
@@ -2749,35 +2748,34 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
/* If we are tracing schedule, we don't want to recurse */
preempt_disable_notrace();
- if (atomic_read(&buffer->record_disabled))
- goto out_nocheck;
-
- if (trace_recursive_lock())
- goto out_nocheck;
+ if (unlikely(atomic_read(&buffer->record_disabled)))
+ goto out;
cpu = raw_smp_processor_id();
- if (!cpumask_test_cpu(cpu, buffer->cpumask))
+ if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask)))
goto out;
cpu_buffer = buffer->buffers[cpu];
- if (atomic_read(&cpu_buffer->record_disabled))
+ if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
goto out;
- if (length > BUF_MAX_DATA_SIZE)
+ if (unlikely(length > BUF_MAX_DATA_SIZE))
+ goto out;
+
+ if (unlikely(trace_recursive_lock(cpu_buffer)))
goto out;
event = rb_reserve_next_event(buffer, cpu_buffer, length);
if (!event)
- goto out;
+ goto out_unlock;
return event;
+ out_unlock:
+ trace_recursive_unlock(cpu_buffer);
out:
- trace_recursive_unlock();
-
- out_nocheck:
preempt_enable_notrace();
return NULL;
}
@@ -2867,7 +2865,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
rb_wakeups(buffer, cpu_buffer);
- trace_recursive_unlock();
+ trace_recursive_unlock(cpu_buffer);
preempt_enable_notrace();
@@ -2978,7 +2976,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
out:
rb_end_commit(cpu_buffer);
- trace_recursive_unlock();
+ trace_recursive_unlock(cpu_buffer);
preempt_enable_notrace();
@@ -4655,8 +4653,9 @@ static int rb_cpu_notify(struct notifier_block *self,
struct ring_buffer *buffer =
container_of(self, struct ring_buffer, cpu_notify);
long cpu = (long)hcpu;
- int cpu_i, nr_pages_same;
- unsigned int nr_pages;
+ long nr_pages_same;
+ int cpu_i;
+ unsigned long nr_pages;
switch (action) {
case CPU_UP_PREPARE:
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 1ff0fd098504..c83ac43418be 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -646,9 +646,9 @@ static struct dma_debug_entry *dma_entry_alloc(void)
spin_lock_irqsave(&free_entries_lock, flags);
if (list_empty(&free_entries)) {
- pr_err("DMA-API: debugging out of memory - disabling\n");
global_disable = true;
spin_unlock_irqrestore(&free_entries_lock, flags);
+ pr_err("DMA-API: debugging out of memory - disabling\n");
return NULL;
}
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index c548ab213f76..d08f75972c57 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -855,8 +855,8 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
goto out;
if (svc_getnl(&buf->head[0]) != seq)
goto out;
- /* trim off the mic at the end before returning */
- xdr_buf_trim(buf, mic.len + 4);
+ /* trim off the mic and padding at the end before returning */
+ xdr_buf_trim(buf, round_up_to_quad(mic.len) + 4);
stat = 0;
out:
kfree(mic.data);
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index f734033af219..5c17a5c1f306 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -24,6 +24,7 @@ warning-1 += $(call cc-option, -Wmissing-prototypes)
warning-1 += -Wold-style-definition
warning-1 += $(call cc-option, -Wmissing-include-dirs)
warning-1 += $(call cc-option, -Wunused-but-set-variable)
+warning-1 += $(call cc-option, -Wunused-const-variable)
warning-1 += $(call cc-disable-warning, missing-field-initializers)
warning-2 := -Waggregate-return
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 041712592e29..f449d8710b00 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -64,12 +64,15 @@
#define FIL1_0 0x1c
#define FIL1_1 0x1d
#define FIL1_2 0x1e
-#define FIL1_3 0x1f
+#define FIL1_3 0x1f /* The maximum valid register for ak4642 */
#define PW_MGMT4 0x20
#define MD_CTL5 0x21
#define LO_MS 0x22
#define HP_MS 0x23
-#define SPK_MS 0x24
+#define SPK_MS 0x24 /* The maximum valid register for ak4643 */
+#define EQ_FBEQAB 0x25
+#define EQ_FBEQCD 0x26
+#define EQ_FBEQE 0x27 /* The maximum valid register for ak4648 */
/* PW_MGMT1*/
#define PMVCM (1 << 6) /* VCOM Power Management */
@@ -210,7 +213,7 @@ static const struct snd_soc_dapm_route ak4642_intercon[] = {
/*
* ak4642 register cache
*/
-static const struct reg_default ak4642_reg[] = {
+static const struct reg_default ak4643_reg[] = {
{ 0, 0x00 }, { 1, 0x00 }, { 2, 0x01 }, { 3, 0x00 },
{ 4, 0x02 }, { 5, 0x00 }, { 6, 0x00 }, { 7, 0x00 },
{ 8, 0xe1 }, { 9, 0xe1 }, { 10, 0x18 }, { 11, 0x00 },
@@ -223,6 +226,14 @@ static const struct reg_default ak4642_reg[] = {
{ 36, 0x00 },
};
+/* The default settings for 0x0 ~ 0x1f registers are the same for ak4642
+ and ak4643. So we reuse the ak4643 reg_default for ak4642.
+ The valid registers for ak4642 are 0x0 ~ 0x1f which is a subset of ak4643,
+ so define NUM_AK4642_REG_DEFAULTS for ak4642.
+*/
+#define ak4642_reg ak4643_reg
+#define NUM_AK4642_REG_DEFAULTS (FIL1_3 + 1)
+
static const struct reg_default ak4648_reg[] = {
{ 0, 0x00 }, { 1, 0x00 }, { 2, 0x01 }, { 3, 0x00 },
{ 4, 0x02 }, { 5, 0x00 }, { 6, 0x00 }, { 7, 0x00 },
@@ -521,17 +532,28 @@ static struct snd_soc_codec_driver soc_codec_dev_ak4642 = {
static const struct regmap_config ak4642_regmap = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = ARRAY_SIZE(ak4642_reg) + 1,
+ .max_register = FIL1_3,
.reg_defaults = ak4642_reg,
- .num_reg_defaults = ARRAY_SIZE(ak4642_reg),
+ .num_reg_defaults = NUM_AK4642_REG_DEFAULTS,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct regmap_config ak4643_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = SPK_MS,
+ .reg_defaults = ak4643_reg,
+ .num_reg_defaults = ARRAY_SIZE(ak4643_reg),
+ .cache_type = REGCACHE_RBTREE,
};
static const struct regmap_config ak4648_regmap = {
.reg_bits = 8,
.val_bits = 8,
- .max_register = ARRAY_SIZE(ak4648_reg) + 1,
+ .max_register = EQ_FBEQE,
.reg_defaults = ak4648_reg,
.num_reg_defaults = ARRAY_SIZE(ak4648_reg),
+ .cache_type = REGCACHE_RBTREE,
};
static const struct ak4642_drvdata ak4642_drvdata = {
@@ -539,7 +561,7 @@ static const struct ak4642_drvdata ak4642_drvdata = {
};
static const struct ak4642_drvdata ak4643_drvdata = {
- .regmap_config = &ak4642_regmap,
+ .regmap_config = &ak4643_regmap,
};
static const struct ak4642_drvdata ak4648_drvdata = {