aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/drm.tmpl2
-rw-r--r--Documentation/filesystems/Locking6
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/char/hw_random/virtio-rng.c1
-rw-r--r--drivers/edac/Kconfig4
-rw-r--r--drivers/edac/edac_mc_sysfs.c4
-rw-r--r--drivers/edac/sb_edac.c810
-rw-r--r--drivers/gpu/drm/nouveau/core/core/client.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/client.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bar/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvif/class.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvif/notify.c29
-rw-r--r--drivers/gpu/drm/nouveau/nvif/object.c4
-rw-r--r--drivers/hid/wacom_sys.c2
-rw-r--r--drivers/input/input-mt.c38
-rw-r--r--drivers/input/joystick/analog.c2
-rw-r--r--drivers/input/joystick/xpad.c174
-rw-r--r--drivers/input/keyboard/cap1106.c8
-rw-r--r--drivers/input/mouse/synaptics.c72
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c366
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c1
-rw-r--r--drivers/scsi/libiscsi.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c144
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c39
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c14
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c16
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c6
-rw-r--r--drivers/scsi/u14-34f.c4
-rw-r--r--fs/aio.c86
-rw-r--r--fs/btrfs/backref.c14
-rw-r--r--fs/btrfs/btrfs_inode.h6
-rw-r--r--fs/btrfs/ctree.c20
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/disk-io.c32
-rw-r--r--fs/btrfs/extent-tree.c285
-rw-r--r--fs/btrfs/file-item.c2
-rw-r--r--fs/btrfs/file.c26
-rw-r--r--fs/btrfs/inode.c59
-rw-r--r--fs/btrfs/ordered-data.c123
-rw-r--r--fs/btrfs/ordered-data.h5
-rw-r--r--fs/btrfs/qgroup.c169
-rw-r--r--fs/btrfs/qgroup.h1
-rw-r--r--fs/btrfs/super.c51
-rw-r--r--fs/btrfs/transaction.c33
-rw-r--r--fs/btrfs/transaction.h1
-rw-r--r--fs/btrfs/ulist.h15
-rw-r--r--fs/locks.c86
-rw-r--r--include/linux/edac.h5
-rw-r--r--include/linux/input/mt.h1
-rw-r--r--include/scsi/iscsi_if.h1
-rw-r--r--sound/oss/uart401.c40
-rw-r--r--sound/oss/waveartist.c157
-rw-r--r--sound/pci/hda/hda_intel.c7
-rw-r--r--sound/pci/hda/patch_ca0132.c7
-rw-r--r--sound/pci/hda/patch_cmedia.c47
-rw-r--r--sound/pci/hda/patch_conexant.c6
-rw-r--r--sound/pci/hda/patch_realtek.c35
-rw-r--r--sound/usb/quirks-table.h29
69 files changed, 2069 insertions, 1096 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 1d3756d3176c..bacefc5b222e 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -315,7 +315,7 @@ char *date;</synopsis>
<function>drm_dev_unregister()</function> followed by a call to
<function>drm_dev_unref()</function>.
</para>
-!Edrivers/gpu/drm/drm_stub.c
+!Edrivers/gpu/drm/drm_drv.c
</sect2>
<sect2>
<title>Driver Load</title>
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index b18dd1779029..f1997e9da61f 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -349,7 +349,11 @@ prototypes:
locking rules:
inode->i_lock may block
fl_copy_lock: yes no
-fl_release_private: maybe no
+fl_release_private: maybe maybe[1]
+
+[1]: ->fl_release_private for flock or POSIX locks is currently allowed
+to block. Leases however can still be freed while the i_lock is held and
+so fl_release_private called on a lease should not block.
----------------------- lock_manager_operations ---------------------------
prototypes:
diff --git a/MAINTAINERS b/MAINTAINERS
index 2f85f55c8fb8..3bf3fc26c891 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7059,6 +7059,7 @@ F: drivers/scsi/pmcraid.*
PMC SIERRA PM8001 DRIVER
M: xjtuwjp@gmail.com
M: lindar_liu@usish.com
+L: pmchba@pmcs.com
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/pm8001/
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 0027137daa56..2e3139eda93b 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -116,6 +116,7 @@ static int probe_common(struct virtio_device *vdev)
.cleanup = virtio_cleanup,
.priv = (unsigned long)vi,
.name = vi->name,
+ .quality = 1000,
};
vdev->priv = vi;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index f8665f9c3e03..fd89ca982748 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -253,12 +253,12 @@ config EDAC_I7300
Clarksboro MCH (Intel 7300 chipset).
config EDAC_SBRIDGE
- tristate "Intel Sandy-Bridge Integrated MC"
+ tristate "Intel Sandy-Bridge/Ivy-Bridge/Haswell Integrated MC"
depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
depends on PCI_MMCONFIG
help
Support for error detection and correction the Intel
- Sandy Bridge Integrated Memory Controller.
+ Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers.
config EDAC_MPC85XX
tristate "Freescale MPC83xx / MPC85xx"
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 01fae8289cf0..a6cd36100663 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -108,7 +108,9 @@ static const char * const mem_types[] = {
[MEM_RDDR2] = "Registered-DDR2",
[MEM_XDR] = "XDR",
[MEM_DDR3] = "Unbuffered-DDR3",
- [MEM_RDDR3] = "Registered-DDR3"
+ [MEM_RDDR3] = "Registered-DDR3",
+ [MEM_DDR4] = "Unbuffered-DDR4",
+ [MEM_RDDR4] = "Registered-DDR4"
};
static const char * const dev_types[] = {
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index deea0dc9999b..0034c4844428 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -99,6 +99,7 @@ static const u32 ibridge_dram_rule[] = {
#define DRAM_ATTR(reg) GET_BITFIELD(reg, 2, 3)
#define INTERLEAVE_MODE(reg) GET_BITFIELD(reg, 1, 1)
#define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
+#define A7MODE(reg) GET_BITFIELD(reg, 26, 26)
static char *get_dram_attr(u32 reg)
{
@@ -164,6 +165,8 @@ static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
#define TOLM 0x80
#define TOHM 0x84
+#define HASWELL_TOHM_0 0xd4
+#define HASWELL_TOHM_1 0xd8
#define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff)
#define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
@@ -176,8 +179,6 @@ static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
#define SAD_CONTROL 0xf4
-#define NODE_ID(reg) GET_BITFIELD(reg, 0, 2)
-
/* Device 14 function 0 */
static const u32 tad_dram_rule[] = {
@@ -235,7 +236,6 @@ static const u32 rir_way_limit[] = {
#define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31)
#define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29)
-#define RIR_LIMIT(reg) ((GET_BITFIELD(reg, 1, 10) << 29)| 0x1fffffff)
#define MAX_RIR_WAY 8
@@ -279,8 +279,6 @@ static const u32 correrrthrsld[] = {
#define IB_RANK_CFG_A 0x0320
-#define IS_RDIMM_ENABLED(reg) GET_BITFIELD(reg, 11, 11)
-
/*
* sbridge structs
*/
@@ -291,6 +289,7 @@ static const u32 correrrthrsld[] = {
enum type {
SANDY_BRIDGE,
IVY_BRIDGE,
+ HASWELL,
};
struct sbridge_pvt;
@@ -300,11 +299,15 @@ struct sbridge_info {
u32 rankcfgr;
u64 (*get_tolm)(struct sbridge_pvt *pvt);
u64 (*get_tohm)(struct sbridge_pvt *pvt);
+ u64 (*rir_limit)(u32 reg);
const u32 *dram_rule;
const u32 *interleave_list;
const struct interleave_pkg *interleave_pkg;
u8 max_sad;
u8 max_interleave;
+ u8 (*get_node_id)(struct sbridge_pvt *pvt);
+ enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt);
+ struct pci_dev *pci_vtd;
};
struct sbridge_channel {
@@ -313,9 +316,7 @@ struct sbridge_channel {
};
struct pci_id_descr {
- int dev;
- int func;
- int dev_id;
+ int dev_id;
int optional;
};
@@ -338,6 +339,7 @@ struct sbridge_pvt {
struct pci_dev *pci_sad0, *pci_sad1;
struct pci_dev *pci_ha0, *pci_ha1;
struct pci_dev *pci_br0, *pci_br1;
+ struct pci_dev *pci_ha1_ta;
struct pci_dev *pci_tad[NUM_CHANNELS];
struct sbridge_dev *sbridge_dev;
@@ -362,31 +364,29 @@ struct sbridge_pvt {
u64 tolm, tohm;
};
-#define PCI_DESCR(device, function, device_id, opt) \
- .dev = (device), \
- .func = (function), \
- .dev_id = (device_id), \
+#define PCI_DESCR(device_id, opt) \
+ .dev_id = (device_id), \
.optional = opt
static const struct pci_id_descr pci_dev_descr_sbridge[] = {
/* Processor Home Agent */
- { PCI_DESCR(14, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0) },
/* Memory controller */
- { PCI_DESCR(15, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0) },
- { PCI_DESCR(15, 1, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0) },
- { PCI_DESCR(15, 2, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0) },
- { PCI_DESCR(15, 3, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0) },
- { PCI_DESCR(15, 4, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0) },
- { PCI_DESCR(15, 5, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0) },
- { PCI_DESCR(17, 0, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1) },
/* System Address Decoder */
- { PCI_DESCR(12, 6, PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0) },
- { PCI_DESCR(12, 7, PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0) },
/* Broadcast Registers */
- { PCI_DESCR(13, 6, PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0) },
};
#define PCI_ID_TABLE_ENTRY(A) { .descr=A, .n_devs = ARRAY_SIZE(A) }
@@ -423,34 +423,34 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
static const struct pci_id_descr pci_dev_descr_ibridge[] = {
/* Processor Home Agent */
- { PCI_DESCR(14, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0) },
/* Memory controller */
- { PCI_DESCR(15, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) },
- { PCI_DESCR(15, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) },
- { PCI_DESCR(15, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) },
- { PCI_DESCR(15, 3, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) },
- { PCI_DESCR(15, 4, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) },
- { PCI_DESCR(15, 5, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0) },
/* System Address Decoder */
- { PCI_DESCR(22, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0) },
/* Broadcast Registers */
- { PCI_DESCR(22, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) },
- { PCI_DESCR(22, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0) },
/* Optional, mode 2HA */
- { PCI_DESCR(28, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1) },
#if 0
- { PCI_DESCR(29, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) },
- { PCI_DESCR(29, 1, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1) },
#endif
- { PCI_DESCR(29, 2, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) },
- { PCI_DESCR(29, 3, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1) },
- { PCI_DESCR(17, 0, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) },
- { PCI_DESCR(17, 4, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1) },
};
static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
@@ -458,12 +458,80 @@ static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
{0,} /* 0 terminated list. */
};
+/* Haswell support */
+/* EN processor:
+ * - 1 IMC
+ * - 3 DDR3 channels, 2 DPC per channel
+ * EP processor:
+ * - 1 or 2 IMC
+ * - 4 DDR4 channels, 3 DPC per channel
+ * EP 4S processor:
+ * - 2 IMC
+ * - 4 DDR4 channels, 3 DPC per channel
+ * EX processor:
+ * - 2 IMC
+ * - each IMC interfaces with a SMI 2 channel
+ * - each SMI channel interfaces with a scalable memory buffer
+ * - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
+ */
+#define HASWELL_DDRCRCLKCONTROLS 0xa10
+#define HASWELL_HASYSDEFEATURE2 0x84
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL 0x2f71
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL 0x2f79
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
+#define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
+static const struct pci_id_descr pci_dev_descr_haswell[] = {
+ /* first item must be the HA */
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0) },
+
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0) },
+
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1) },
+
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1) },
+
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1) },
+
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_THERMAL, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1) },
+ { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1) },
+};
+
+static const struct pci_id_table pci_dev_descr_haswell_table[] = {
+ PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell),
+ {0,} /* 0 terminated list. */
+};
+
/*
* pci_device_id table for which devices we are looking for
*/
static const struct pci_device_id sbridge_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0)},
{0,} /* 0 terminated list. */
};
@@ -472,13 +540,17 @@ static const struct pci_device_id sbridge_pci_tbl[] = {
Ancillary status routines
****************************************************************************/
-static inline int numrank(u32 mtr)
+static inline int numrank(enum type type, u32 mtr)
{
int ranks = (1 << RANK_CNT_BITS(mtr));
+ int max = 4;
- if (ranks > 4) {
- edac_dbg(0, "Invalid number of ranks: %d (max = 4) raw value = %x (%04x)\n",
- ranks, (unsigned int)RANK_CNT_BITS(mtr), mtr);
+ if (type == HASWELL)
+ max = 8;
+
+ if (ranks > max) {
+ edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
+ ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr);
return -EINVAL;
}
@@ -588,10 +660,107 @@ static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
return GET_TOHM(reg);
}
+static u64 rir_limit(u32 reg)
+{
+ return ((u64)GET_BITFIELD(reg, 1, 10) << 29) | 0x1fffffff;
+}
+
+static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
+{
+ u32 reg;
+ enum mem_type mtype;
+
+ if (pvt->pci_ddrio) {
+ pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
+ &reg);
+ if (GET_BITFIELD(reg, 11, 11))
+ /* FIXME: Can also be LRDIMM */
+ mtype = MEM_RDDR3;
+ else
+ mtype = MEM_DDR3;
+ } else
+ mtype = MEM_UNKNOWN;
+
+ return mtype;
+}
+
+static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
+{
+ u32 reg;
+ bool registered = false;
+ enum mem_type mtype = MEM_UNKNOWN;
+
+ if (!pvt->pci_ddrio)
+ goto out;
+
+ pci_read_config_dword(pvt->pci_ddrio,
+ HASWELL_DDRCRCLKCONTROLS, &reg);
+ /* Is_Rdimm */
+ if (GET_BITFIELD(reg, 16, 16))
+ registered = true;
+
+ pci_read_config_dword(pvt->pci_ta, MCMTR, &reg);
+ if (GET_BITFIELD(reg, 14, 14)) {
+ if (registered)
+ mtype = MEM_RDDR4;
+ else
+ mtype = MEM_DDR4;
+ } else {
+ if (registered)
+ mtype = MEM_RDDR3;
+ else
+ mtype = MEM_DDR3;
+ }
+
+out:
+ return mtype;
+}
+
+static u8 get_node_id(struct sbridge_pvt *pvt)
+{
+ u32 reg;
+ pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, &reg);
+ return GET_BITFIELD(reg, 0, 2);
+}
+
+static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
+{
+ u32 reg;
+
+ pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
+ return GET_BITFIELD(reg, 0, 3);
+}
+
+static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
+{
+ u32 reg;
+
+ pci_read_config_dword(pvt->info.pci_vtd, TOLM, &reg);
+ return (GET_BITFIELD(reg, 26, 31) << 26) | 0x1ffffff;
+}
+
+static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
+{
+ u64 rc;
+ u32 reg;
+
+ pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, &reg);
+ rc = GET_BITFIELD(reg, 26, 31);
+ pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, &reg);
+ rc = ((reg << 6) | rc) << 26;
+
+ return rc | 0x1ffffff;
+}
+
+static u64 haswell_rir_limit(u32 reg)
+{
+ return (((u64)GET_BITFIELD(reg, 1, 11) + 1) << 29) - 1;
+}
+
static inline u8 sad_pkg_socket(u8 pkg)
{
/* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */
- return (pkg >> 3) | (pkg & 0x3);
+ return ((pkg >> 3) << 2) | (pkg & 0x3);
}
static inline u8 sad_pkg_ha(u8 pkg)
@@ -602,44 +771,43 @@ static inline u8 sad_pkg_ha(u8 pkg)
/****************************************************************************
Memory check routines
****************************************************************************/
-static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot,
- unsigned func)
+static struct pci_dev *get_pdev_same_bus(u8 bus, u32 id)
{
- struct sbridge_dev *sbridge_dev = get_sbridge_dev(bus);
- int i;
-
- if (!sbridge_dev)
- return NULL;
-
- for (i = 0; i < sbridge_dev->n_devs; i++) {
- if (!sbridge_dev->pdev[i])
- continue;
+ struct pci_dev *pdev = NULL;
- if (PCI_SLOT(sbridge_dev->pdev[i]->devfn) == slot &&
- PCI_FUNC(sbridge_dev->pdev[i]->devfn) == func) {
- edac_dbg(1, "Associated %02x.%02x.%d with %p\n",
- bus, slot, func, sbridge_dev->pdev[i]);
- return sbridge_dev->pdev[i];
- }
- }
+ do {
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, id, pdev);
+ if (pdev && pdev->bus->number == bus)
+ break;
+ } while (pdev);
- return NULL;
+ return pdev;
}
/**
* check_if_ecc_is_active() - Checks if ECC is active
- * bus: Device bus
+ * @bus: Device bus
+ * @type: Memory controller type
+ * returns: 0 in case ECC is active, -ENODEV if it can't be determined or
+ * disabled
*/
-static int check_if_ecc_is_active(const u8 bus)
+static int check_if_ecc_is_active(const u8 bus, enum type type)
{
struct pci_dev *pdev = NULL;
- u32 mcmtr;
+ u32 mcmtr, id;
+
+ if (type == IVY_BRIDGE)
+ id = PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA;
+ else if (type == HASWELL)
+ id = PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA;
+ else
+ id = PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA;
- pdev = get_pdev_slot_func(bus, 15, 0);
+ pdev = get_pdev_same_bus(bus, id);
if (!pdev) {
sbridge_printk(KERN_ERR, "Couldn't find PCI device "
- "%2x.%02d.%d!!!\n",
- bus, 15, 0);
+ "%04x:%04x! on bus %02d\n",
+ PCI_VENDOR_ID_INTEL, id, bus);
return -ENODEV;
}
@@ -661,11 +829,14 @@ static int get_dimm_config(struct mem_ctl_info *mci)
enum edac_type mode;
enum mem_type mtype;
- pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg);
+ if (pvt->info.type == HASWELL)
+ pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
+ else
+ pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg);
+
pvt->sbridge_dev->source_id = SOURCE_ID(reg);
- pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, &reg);
- pvt->sbridge_dev->node_id = NODE_ID(reg);
+ pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
pvt->sbridge_dev->mc,
pvt->sbridge_dev->node_id,
@@ -698,24 +869,18 @@ static int get_dimm_config(struct mem_ctl_info *mci)
pvt->is_close_pg = false;
}
- if (pvt->pci_ddrio) {
- pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
- &reg);
- if (IS_RDIMM_ENABLED(reg)) {
- /* FIXME: Can also be LRDIMM */
- edac_dbg(0, "Memory is registered\n");
- mtype = MEM_RDDR3;
- } else {
- edac_dbg(0, "Memory is unregistered\n");
- mtype = MEM_DDR3;
- }
- } else {
+ mtype = pvt->info.get_memory_type(pvt);
+ if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
+ edac_dbg(0, "Memory is registered\n");
+ else if (mtype == MEM_UNKNOWN)
edac_dbg(0, "Cannot determine memory type\n");
- mtype = MEM_UNKNOWN;
- }
+ else
+ edac_dbg(0, "Memory is unregistered\n");
- /* On all supported DDR3 DIMM types, there are 8 banks available */
- banks = 8;
+ if (mtype == MEM_DDR4 || MEM_RDDR4)
+ banks = 16;
+ else
+ banks = 8;
for (i = 0; i < NUM_CHANNELS; i++) {
u32 mtr;
@@ -729,11 +894,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
if (IS_DIMM_PRESENT(mtr)) {
pvt->channel[i].dimms++;
- ranks = numrank(mtr);
+ ranks = numrank(pvt->info.type, mtr);
rows = numrow(mtr);
cols = numcol(mtr);
- /* DDR3 has 8 I/O banks */
size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
npages = MiB_TO_PAGES(size);
@@ -744,7 +908,17 @@ static int get_dimm_config(struct mem_ctl_info *mci)
dimm->nr_pages = npages;
dimm->grain = 32;
- dimm->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
+ switch (banks) {
+ case 16:
+ dimm->dtype = DEV_X16;
+ break;
+ case 8:
+ dimm->dtype = DEV_X8;
+ break;
+ case 4:
+ dimm->dtype = DEV_X4;
+ break;
+ }
dimm->mtype = mtype;
dimm->edac_mode = mode;
snprintf(dimm->label, sizeof(dimm->label),
@@ -887,7 +1061,7 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
if (!IS_RIR_VALID(reg))
continue;
- tmp_mb = RIR_LIMIT(reg) >> 20;
+ tmp_mb = pvt->info.rir_limit(reg) >> 20;
rir_way = 1 << RIR_WAY(reg);
mb = div_u64_rem(tmp_mb, 1000, &kb);
edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
@@ -936,11 +1110,11 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
struct mem_ctl_info *new_mci;
struct sbridge_pvt *pvt = mci->pvt_info;
struct pci_dev *pci_ha;
- int n_rir, n_sads, n_tads, sad_way, sck_xch;
+ int n_rir, n_sads, n_tads, sad_way, sck_xch;
int sad_interl, idx, base_ch;
- int interleave_mode;
+ int interleave_mode, shiftup = 0;
unsigned sad_interleave[pvt->info.max_interleave];
- u32 reg;
+ u32 reg, dram_rule;
u8 ch_way, sck_way, pkg, sad_ha = 0;
u32 tad_offset;
u32 rir_way;
@@ -987,8 +1161,9 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
sprintf(msg, "Can't discover the memory socket");
return -EINVAL;
}
- *area_type = get_dram_attr(reg);
- interleave_mode = INTERLEAVE_MODE(reg);
+ dram_rule = reg;
+ *area_type = get_dram_attr(dram_rule);
+ interleave_mode = INTERLEAVE_MODE(dram_rule);
pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
&reg);
@@ -1033,6 +1208,36 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
*socket = sad_interleave[idx];
edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
idx, sad_way, *socket);
+ } else if (pvt->info.type == HASWELL) {
+ int bits, a7mode = A7MODE(dram_rule);
+
+ if (a7mode) {
+ /* A7 mode swaps P9 with P6 */
+ bits = GET_BITFIELD(addr, 7, 8) << 1;
+ bits |= GET_BITFIELD(addr, 9, 9);
+ } else
+ bits = GET_BITFIELD(addr, 7, 9);
+
+ if (interleave_mode) {
+ /* interleave mode will XOR {8,7,6} with {18,17,16} */
+ idx = GET_BITFIELD(addr, 16, 18);
+ idx ^= bits;
+ } else
+ idx = bits;
+
+ pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
+ *socket = sad_pkg_socket(pkg);
+ sad_ha = sad_pkg_ha(pkg);
+
+ if (a7mode) {
+ /* MCChanShiftUpEnable */
+ pci_read_config_dword(pvt->pci_ha0,
+ HASWELL_HASYSDEFEATURE2, &reg);
+ shiftup = GET_BITFIELD(reg, 22, 22);
+ }
+
+ edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n",
+ idx, *socket, sad_ha, shiftup);
} else {
/* Ivy Bridge's SAD mode doesn't support XOR interleave mode */
idx = (addr >> 6) & 7;
@@ -1090,7 +1295,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
if (ch_way == 3)
idx = addr >> 6;
else
- idx = addr >> (6 + sck_way);
+ idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
idx = idx % ch_way;
/*
@@ -1181,7 +1386,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
if (!IS_RIR_VALID(reg))
continue;
- limit = RIR_LIMIT(reg);
+ limit = pvt->info.rir_limit(reg);
mb = div_u64_rem(limit >> 20, 1000, &kb);
edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
n_rir,
@@ -1197,6 +1402,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
return -EINVAL;
}
rir_way = RIR_WAY(reg);
+
if (pvt->is_close_pg)
idx = (ch_addr >> 6);
else
@@ -1259,13 +1465,11 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
{
struct sbridge_dev *sbridge_dev;
const struct pci_id_descr *dev_descr = &table->descr[devno];
-
struct pci_dev *pdev = NULL;
u8 bus = 0;
sbridge_printk(KERN_DEBUG,
- "Seeking for: dev %02x.%d PCI ID %04x:%04x\n",
- dev_descr->dev, dev_descr->func,
+ "Seeking for: PCI ID %04x:%04x\n",
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
@@ -1280,12 +1484,12 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
if (dev_descr->optional)
return 0;
+ /* if the HA wasn't found */
if (devno == 0)
return -ENODEV;
sbridge_printk(KERN_INFO,
- "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
- dev_descr->dev, dev_descr->func,
+ "Device not found: %04x:%04x\n",
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
/* End of list, leave */
@@ -1305,9 +1509,7 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
if (sbridge_dev->pdev[devno]) {
sbridge_printk(KERN_ERR,
- "Duplicated device for "
- "dev %02x:%d.%d PCI ID %04x:%04x\n",
- bus, dev_descr->dev, dev_descr->func,
+ "Duplicated device for %04x:%04x\n",
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
pci_dev_put(pdev);
return -ENODEV;
@@ -1315,30 +1517,15 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
sbridge_dev->pdev[devno] = pdev;
- /* Sanity check */
- if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
- PCI_FUNC(pdev->devfn) != dev_descr->func)) {
- sbridge_printk(KERN_ERR,
- "Device PCI ID %04x:%04x "
- "has dev %02x:%d.%d instead of dev %02x:%02x.%d\n",
- PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
- bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
- bus, dev_descr->dev, dev_descr->func);
- return -ENODEV;
- }
-
/* Be sure that the device is enabled */
if (unlikely(pci_enable_device(pdev) < 0)) {
sbridge_printk(KERN_ERR,
- "Couldn't enable "
- "dev %02x:%d.%d PCI ID %04x:%04x\n",
- bus, dev_descr->dev, dev_descr->func,
+ "Couldn't enable %04x:%04x\n",
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
return -ENODEV;
}
- edac_dbg(0, "Detected dev %02x:%d.%d PCI ID %04x:%04x\n",
- bus, dev_descr->dev, dev_descr->func,
+ edac_dbg(0, "Detected %04x:%04x\n",
PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
/*
@@ -1355,10 +1542,9 @@ static int sbridge_get_onedevice(struct pci_dev **prev,
/*
* sbridge_get_all_devices - Find and perform 'get' operation on the MCH's
- * device/functions we want to reference for this driver.
- * Need to 'get' device 16 func 1 and func 2.
+ * devices we want to reference for this driver.
* @num_mc: pointer to the memory controllers count, to be incremented in case
- * of success.
+ * of success.
* @table: model specific table
*
* returns 0 in case of success or error code
@@ -1396,79 +1582,51 @@ static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
{
struct sbridge_pvt *pvt = mci->pvt_info;
struct pci_dev *pdev;
- int i, func, slot;
+ int i;
for (i = 0; i < sbridge_dev->n_devs; i++) {
pdev = sbridge_dev->pdev[i];
if (!pdev)
continue;
- slot = PCI_SLOT(pdev->devfn);
- func = PCI_FUNC(pdev->devfn);
- switch (slot) {
- case 12:
- switch (func) {
- case 6:
- pvt->pci_sad0 = pdev;
- break;
- case 7:
- pvt->pci_sad1 = pdev;
- break;
- default:
- goto error;
- }
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0:
+ pvt->pci_sad0 = pdev;
break;
- case 13:
- switch (func) {
- case 6:
- pvt->pci_br0 = pdev;
- break;
- default:
- goto error;
- }
+ case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1:
+ pvt->pci_sad1 = pdev;
break;
- case 14:
- switch (func) {
- case 0:
- pvt->pci_ha0 = pdev;
- break;
- default:
- goto error;
- }
+ case PCI_DEVICE_ID_INTEL_SBRIDGE_BR:
+ pvt->pci_br0 = pdev;
break;
- case 15:
- switch (func) {
- case 0:
- pvt->pci_ta = pdev;
- break;
- case 1:
- pvt->pci_ras = pdev;
- break;
- case 2:
- case 3:
- case 4:
- case 5:
- pvt->pci_tad[func - 2] = pdev;
- break;
- default:
- goto error;
- }
+ case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
+ pvt->pci_ha0 = pdev;
break;
- case 17:
- switch (func) {
- case 0:
- pvt->pci_ddrio = pdev;
- break;
- default:
- goto error;
- }
+ case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
+ pvt->pci_ta = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS:
+ pvt->pci_ras = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0:
+ case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1:
+ case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
+ case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
+ {
+ int id = pdev->device - PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0;
+ pvt->pci_tad[id] = pdev;
+ }
+ break;
+ case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
+ pvt->pci_ddrio = pdev;
break;
default:
goto error;
}
- edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
+ edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n",
+ pdev->vendor, pdev->device,
sbridge_dev->bus,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
pdev);
}
@@ -1488,9 +1646,8 @@ enodev:
return -ENODEV;
error:
- sbridge_printk(KERN_ERR, "Device %d, function %d "
- "is out of the expected range\n",
- slot, func);
+ sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n",
+ PCI_VENDOR_ID_INTEL, pdev->device);
return -EINVAL;
}
@@ -1499,7 +1656,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
{
struct sbridge_pvt *pvt = mci->pvt_info;
struct pci_dev *pdev, *tmp;
- int i, func, slot;
+ int i;
bool mode_2ha = false;
tmp = pci_get_device(PCI_VENDOR_ID_INTEL,
@@ -1513,79 +1670,60 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
pdev = sbridge_dev->pdev[i];
if (!pdev)
continue;
- slot = PCI_SLOT(pdev->devfn);
- func = PCI_FUNC(pdev->devfn);
- switch (slot) {
- case 14:
- if (func == 0) {
- pvt->pci_ha0 = pdev;
- break;
- }
- goto error;
- case 15:
- switch (func) {
- case 0:
- pvt->pci_ta = pdev;
- break;
- case 1:
- pvt->pci_ras = pdev;
- break;
- case 4:
- case 5:
- /* if we have 2 HAs active, channels 2 and 3
- * are in other device */
- if (mode_2ha)
- break;
- /* fall through */
- case 2:
- case 3:
- pvt->pci_tad[func - 2] = pdev;
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
+ pvt->pci_ha0 = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
+ pvt->pci_ta = pdev;
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
+ pvt->pci_ras = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
+ /* if we have 2 HAs active, channels 2 and 3
+ * are in other device */
+ if (mode_2ha)
break;
- default:
- goto error;
- }
+ /* fall through */
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
+ {
+ int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0;
+ pvt->pci_tad[id] = pdev;
+ }
break;
- case 17:
- if (func == 4) {
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0:
+ pvt->pci_ddrio = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0:
+ if (!mode_2ha)
pvt->pci_ddrio = pdev;
- break;
- } else if (func == 0) {
- if (!mode_2ha)
- pvt->pci_ddrio = pdev;
- break;
- }
- goto error;
- case 22:
- switch (func) {
- case 0:
- pvt->pci_sad0 = pdev;
- break;
- case 1:
- pvt->pci_br0 = pdev;
- break;
- case 2:
- pvt->pci_br1 = pdev;
- break;
- default:
- goto error;
- }
break;
- case 28:
- if (func == 0) {
- pvt->pci_ha1 = pdev;
- break;
- }
- goto error;
- case 29:
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD:
+ pvt->pci_sad0 = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0:
+ pvt->pci_br0 = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
+ pvt->pci_br1 = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
+ pvt->pci_ha1 = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
+ {
+ int id = pdev->device - PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 + 2;
+
/* we shouldn't have this device if we have just one
* HA present */
WARN_ON(!mode_2ha);
- if (func == 2 || func == 3) {
- pvt->pci_tad[func] = pdev;
- break;
- }
- goto error;
+ pvt->pci_tad[id] = pdev;
+ }
+ break;
default:
goto error;
}
@@ -1614,11 +1752,111 @@ enodev:
error:
sbridge_printk(KERN_ERR,
- "Device %d, function %d is out of the expected range\n",
- slot, func);
+ "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL,
+ pdev->device);
return -EINVAL;
}
+static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
+ struct sbridge_dev *sbridge_dev)
+{
+ struct sbridge_pvt *pvt = mci->pvt_info;
+ struct pci_dev *pdev, *tmp;
+ int i;
+ bool mode_2ha = false;
+
+ tmp = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, NULL);
+ if (tmp) {
+ mode_2ha = true;
+ pci_dev_put(tmp);
+ }
+
+ /* there's only one device per system; not tied to any bus */
+ if (pvt->info.pci_vtd == NULL)
+ /* result will be checked later */
+ pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC,
+ NULL);
+
+ for (i = 0; i < sbridge_dev->n_devs; i++) {
+ pdev = sbridge_dev->pdev[i];
+ if (!pdev)
+ continue;
+
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0:
+ pvt->pci_sad0 = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1:
+ pvt->pci_sad1 = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
+ pvt->pci_ha0 = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
+ pvt->pci_ta = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_THERMAL:
+ pvt->pci_ras = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
+ pvt->pci_tad[0] = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
+ pvt->pci_tad[1] = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
+ if (!mode_2ha)
+ pvt->pci_tad[2] = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
+ if (!mode_2ha)
+ pvt->pci_tad[3] = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
+ pvt->pci_ddrio = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
+ pvt->pci_ha1 = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
+ pvt->pci_ha1_ta = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
+ if (mode_2ha)
+ pvt->pci_tad[2] = pdev;
+ break;
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
+ if (mode_2ha)
+ pvt->pci_tad[3] = pdev;
+ break;
+ default:
+ break;
+ }
+
+ edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
+ sbridge_dev->bus,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+ pdev);
+ }
+
+ /* Check if everything were registered */
+ if (!pvt->pci_sad0 || !pvt->pci_ha0 || !pvt->pci_sad1 ||
+ !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
+ goto enodev;
+
+ for (i = 0; i < NUM_CHANNELS; i++) {
+ if (!pvt->pci_tad[i])
+ goto enodev;
+ }
+ return 0;
+
+enodev:
+ sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
+ return -ENODEV;
+}
+
/****************************************************************************
Error check routines
****************************************************************************/
@@ -1736,6 +1974,9 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
* EDAC core should be handling the channel mask, in order to point
* to the group of dimm's where the error may be happening.
*/
+ if (!pvt->is_lockstep && !pvt->is_mirrored && !pvt->is_close_pg)
+ channel = first_channel;
+
snprintf(msg, sizeof(msg),
"%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
overflow ? " OVERFLOW" : "",
@@ -1865,10 +2106,6 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
"%u APIC %x\n", mce->cpuvendor, mce->cpuid,
mce->time, mce->socketid, mce->apicid);
- /* Only handle if it is the right mc controller */
- if (cpu_data(mce->cpu).phys_proc_id != pvt->sbridge_dev->mc)
- return NOTIFY_DONE;
-
smp_rmb();
if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
smp_wmb();
@@ -1932,7 +2169,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
int rc;
/* Check the number of active and not disabled channels */
- rc = check_if_ecc_is_active(sbridge_dev->bus);
+ rc = check_if_ecc_is_active(sbridge_dev->bus, type);
if (unlikely(rc < 0))
return rc;
@@ -1971,11 +2208,15 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
mci->edac_check = sbridge_check_error;
pvt->info.type = type;
- if (type == IVY_BRIDGE) {
+ switch (type) {
+ case IVY_BRIDGE:
pvt->info.rankcfgr = IB_RANK_CFG_A;
pvt->info.get_tolm = ibridge_get_tolm;
pvt->info.get_tohm = ibridge_get_tohm;
pvt->info.dram_rule = ibridge_dram_rule;
+ pvt->info.get_memory_type = get_memory_type;
+ pvt->info.get_node_id = get_node_id;
+ pvt->info.rir_limit = rir_limit;
pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
pvt->info.interleave_list = ibridge_interleave_list;
pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
@@ -1986,11 +2227,15 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
rc = ibridge_mci_bind_devs(mci, sbridge_dev);
if (unlikely(rc < 0))
goto fail0;
- } else {
+ break;
+ case SANDY_BRIDGE:
pvt->info.rankcfgr = SB_RANK_CFG_A;
pvt->info.get_tolm = sbridge_get_tolm;
pvt->info.get_tohm = sbridge_get_tohm;
pvt->info.dram_rule = sbridge_dram_rule;
+ pvt->info.get_memory_type = get_memory_type;
+ pvt->info.get_node_id = get_node_id;
+ pvt->info.rir_limit = rir_limit;
pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
pvt->info.interleave_list = sbridge_interleave_list;
pvt->info.max_interleave = ARRAY_SIZE(sbridge_interleave_list);
@@ -2001,8 +2246,27 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
rc = sbridge_mci_bind_devs(mci, sbridge_dev);
if (unlikely(rc < 0))
goto fail0;
- }
+ break;
+ case HASWELL:
+ /* rankcfgr isn't used */
+ pvt->info.get_tolm = haswell_get_tolm;
+ pvt->info.get_tohm = haswell_get_tohm;
+ pvt->info.dram_rule = ibridge_dram_rule;
+ pvt->info.get_memory_type = haswell_get_memory_type;
+ pvt->info.get_node_id = haswell_get_node_id;
+ pvt->info.rir_limit = haswell_rir_limit;
+ pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
+ pvt->info.interleave_list = ibridge_interleave_list;
+ pvt->info.max_interleave = ARRAY_SIZE(ibridge_interleave_list);
+ pvt->info.interleave_pkg = ibridge_interleave_pkg;
+ mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell Socket#%d", mci->mc_idx);
+ /* Store pci devices at mci for faster access */
+ rc = haswell_mci_bind_devs(mci, sbridge_dev);
+ if (unlikely(rc < 0))
+ goto fail0;
+ break;
+ }
/* Get dimm basic config and the memory layout */
get_dimm_config(mci);
@@ -2037,10 +2301,10 @@ fail0:
static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- int rc;
+ int rc = -ENODEV;
u8 mc, num_mc = 0;
struct sbridge_dev *sbridge_dev;
- enum type type;
+ enum type type = SANDY_BRIDGE;
/* get the pci devices we want to reserve for our use */
mutex_lock(&sbridge_edac_lock);
@@ -2054,12 +2318,19 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
probed++;
- if (pdev->device == PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA) {
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table);
type = IVY_BRIDGE;
- } else {
+ break;
+ case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table);
type = SANDY_BRIDGE;
+ break;
+ case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
+ rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_haswell_table);
+ type = HASWELL;
+ break;
}
if (unlikely(rc < 0))
goto fail0;
@@ -2068,6 +2339,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
edac_dbg(0, "Registering MC#%d (%d of %d)\n",
mc, mc + 1, num_mc);
+
sbridge_dev->mc = mc++;
rc = sbridge_register_mci(sbridge_dev, type);
if (unlikely(rc < 0))
diff --git a/drivers/gpu/drm/nouveau/core/core/client.c b/drivers/gpu/drm/nouveau/core/core/client.c
index 10598dede9e9..68bf06768123 100644
--- a/drivers/gpu/drm/nouveau/core/core/client.c
+++ b/drivers/gpu/drm/nouveau/core/core/client.c
@@ -132,12 +132,12 @@ nvkm_client_notify_new(struct nouveau_client *client,
if (ret == 0) {
client->notify[index] = notify;
notify->client = client;
- return 0;
+ return index;
}
}
kfree(notify);
- return 0;
+ return ret;
}
static int
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 54ec53bc6252..cdf9147f32a1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -163,6 +163,7 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = gk20a_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
device->oclass[NVDEV_SUBDEV_IBUS ] = &gk20a_ibus_oclass;
device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index db19191176fa..30fd1dc64f93 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -68,6 +68,9 @@ nvc0_graph_zbc_color_get(struct nvc0_graph_priv *priv, int format,
}
}
+ if (zbc < 0)
+ return zbc;
+
memcpy(priv->zbc_color[zbc].ds, ds, sizeof(priv->zbc_color[zbc].ds));
memcpy(priv->zbc_color[zbc].l2, l2, sizeof(priv->zbc_color[zbc].l2));
priv->zbc_color[zbc].format = format;
@@ -109,6 +112,9 @@ nvc0_graph_zbc_depth_get(struct nvc0_graph_priv *priv, int format,
}
}
+ if (zbc < 0)
+ return zbc;
+
priv->zbc_depth[zbc].format = format;
priv->zbc_depth[zbc].ds = ds;
priv->zbc_depth[zbc].l2 = l2;
diff --git a/drivers/gpu/drm/nouveau/core/include/core/client.h b/drivers/gpu/drm/nouveau/core/include/core/client.h
index 4fc6ab12382d..1794a05205d8 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/client.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/client.h
@@ -14,7 +14,7 @@ struct nouveau_client {
void *data;
int (*ntfy)(const void *, u32, const void *, u32);
- struct nvkm_client_notify *notify[8];
+ struct nvkm_client_notify *notify[16];
};
static inline struct nouveau_client *
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
index 73b1ed20c8d5..8bcbdf39cfb2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bar/base.c
@@ -99,8 +99,13 @@ nouveau_bar_alloc(struct nouveau_bar *bar, struct nouveau_object *parent,
struct nouveau_mem *mem, struct nouveau_object **pobject)
{
struct nouveau_object *engine = nv_object(bar);
- return nouveau_object_ctor(parent, engine, &nouveau_barobj_oclass,
- mem, 0, pobject);
+ int ret = -ENOMEM;
+ if (bar->iomem) {
+ ret = nouveau_object_ctor(parent, engine,
+ &nouveau_barobj_oclass,
+ mem, 0, pobject);
+ }
+ return ret;
}
int
@@ -118,9 +123,12 @@ nouveau_bar_create_(struct nouveau_object *parent,
if (ret)
return ret;
- if (nv_device_resource_len(device, 3) != 0)
+ if (nv_device_resource_len(device, 3) != 0) {
bar->iomem = ioremap(nv_device_resource_start(device, 3),
nv_device_resource_len(device, 3));
+ if (!bar->iomem)
+ nv_warn(bar, "PRAMIN ioremap failed\n");
+ }
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index 946518572346..2b284b192763 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -554,13 +554,13 @@ nvc0_ram_create_(struct nouveau_object *parent, struct nouveau_object *engine,
} else {
/* otherwise, address lowest common amount from 0GiB */
ret = nouveau_mm_init(&pfb->vram, rsvd_head,
- (bsize << 8) * parts, 1);
+ (bsize << 8) * parts - rsvd_head, 1);
if (ret)
return ret;
/* and the rest starting from (8GiB + common_size) */
offset = (0x0200000000ULL >> 12) + (bsize << 8);
- length = (ram->size >> 12) - (bsize << 8) - rsvd_tail;
+ length = (ram->size >> 12) - ((bsize * parts) << 8) - rsvd_tail;
ret = nouveau_mm_init(&pfb->vram, offset, length, 0);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
index 9e00a1ede120..b54b582e72c4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltc/gf100.c
@@ -156,7 +156,7 @@ gf100_ltc_init_tag_ram(struct nouveau_fb *pfb, struct nvkm_ltc_priv *priv)
if (ret) {
priv->num_tags = 0;
} else {
- u64 tag_base = (priv->tag_ram->offset << 12) + tag_margin;
+ u64 tag_base = ((u64)priv->tag_ram->offset << 12) + tag_margin;
tag_base += tag_align - 1;
ret = do_div(tag_base, tag_align);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index da5d631aa5b9..01da508625f2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1228,7 +1228,6 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nouveau_mem *node = mem->mm_node;
- struct drm_device *dev = drm->dev;
int ret;
mem->bus.addr = NULL;
@@ -1247,7 +1246,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
if (drm->agp.stat == ENABLED) {
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = drm->agp.base;
- mem->bus.is_iomem = !dev->agp->cant_use_aperture;
+ mem->bus.is_iomem = !drm->dev->agp->cant_use_aperture;
}
#endif
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 1cc7b603c753..65b4fd53dd4e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -592,7 +592,9 @@ nouveau_display_repin(struct drm_device *dev)
if (!nouveau_fb || !nouveau_fb->nvbo)
continue;
- nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ NV_ERROR(drm, "Could not pin framebuffer\n");
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index ebfe3180109e..8bdd27091db8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -226,7 +226,7 @@ nouveau_fbcon_accel_restore(struct drm_device *dev)
}
}
-void
+static void
nouveau_fbcon_accel_fini(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -246,7 +246,7 @@ nouveau_fbcon_accel_fini(struct drm_device *dev)
}
}
-void
+static void
nouveau_fbcon_accel_init(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 0ffeb50d0088..246a824c16ca 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -149,7 +149,8 @@ power_down:
static int nouveau_platform_remove(struct platform_device *pdev)
{
struct drm_device *drm_dev = platform_get_drvdata(pdev);
- struct nouveau_device *device = nouveau_dev(drm_dev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct nouveau_device *device = nvkm_device(&drm->device);
struct nouveau_platform_gpu *gpu = nv_device_to_platform(device)->gpu;
nouveau_drm_device_remove(drm_dev);
diff --git a/drivers/gpu/drm/nouveau/nvif/class.h b/drivers/gpu/drm/nouveau/nvif/class.h
index cc81e0e5fd30..573491f84792 100644
--- a/drivers/gpu/drm/nouveau/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/nvif/class.h
@@ -428,8 +428,8 @@ struct nv50_disp_dac_pwr_v0 {
struct nv50_disp_dac_load_v0 {
__u8 version;
__u8 load;
- __u16 data;
- __u8 pad04[4];
+ __u8 pad02[2];
+ __u32 data;
};
struct nv50_disp_sor_pwr_v0 {
diff --git a/drivers/gpu/drm/nouveau/nvif/notify.c b/drivers/gpu/drm/nouveau/nvif/notify.c
index 7c06123a559c..0898c3155292 100644
--- a/drivers/gpu/drm/nouveau/nvif/notify.c
+++ b/drivers/gpu/drm/nouveau/nvif/notify.c
@@ -87,12 +87,25 @@ nvif_notify_get(struct nvif_notify *notify)
return 0;
}
+static inline int
+nvif_notify_func(struct nvif_notify *notify, bool keep)
+{
+ int ret = notify->func(notify);
+ if (ret == NVIF_NOTIFY_KEEP ||
+ !test_and_clear_bit(NVKM_NOTIFY_USER, &notify->flags)) {
+ if (!keep)
+ atomic_dec(&notify->putcnt);
+ else
+ nvif_notify_get_(notify);
+ }
+ return ret;
+}
+
static void
nvif_notify_work(struct work_struct *work)
{
struct nvif_notify *notify = container_of(work, typeof(*notify), work);
- if (notify->func(notify) == NVIF_NOTIFY_KEEP)
- nvif_notify_get_(notify);
+ nvif_notify_func(notify, true);
}
int
@@ -113,19 +126,15 @@ nvif_notify(const void *header, u32 length, const void *data, u32 size)
if (!WARN_ON(notify == NULL)) {
struct nvif_client *client = nvif_client(notify->object);
if (!WARN_ON(notify->size != size)) {
+ atomic_inc(&notify->putcnt);
if (test_bit(NVIF_NOTIFY_WORK, &notify->flags)) {
- atomic_inc(&notify->putcnt);
memcpy((void *)notify->data, data, size);
schedule_work(&notify->work);
return NVIF_NOTIFY_DROP;
}
notify->data = data;
- ret = notify->func(notify);
+ ret = nvif_notify_func(notify, client->driver->keep);
notify->data = NULL;
- if (ret != NVIF_NOTIFY_DROP && client->driver->keep) {
- atomic_inc(&notify->putcnt);
- nvif_notify_get_(notify);
- }
}
}
@@ -228,8 +237,10 @@ nvif_notify_new(struct nvif_object *object, int (*func)(struct nvif_notify *),
if (notify) {
int ret = nvif_notify_init(object, nvif_notify_del, func, work,
type, data, size, reply, notify);
- if (ret)
+ if (ret) {
kfree(notify);
+ notify = NULL;
+ }
*pnotify = notify;
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/object.c b/drivers/gpu/drm/nouveau/nvif/object.c
index b0c82206ece2..dd85b56f6aa5 100644
--- a/drivers/gpu/drm/nouveau/nvif/object.c
+++ b/drivers/gpu/drm/nouveau/nvif/object.c
@@ -275,8 +275,10 @@ nvif_object_new(struct nvif_object *parent, u32 handle, u32 oclass,
if (object) {
int ret = nvif_object_init(parent, nvif_object_del, handle,
oclass, data, size, object);
- if (ret)
+ if (ret) {
kfree(object);
+ object = NULL;
+ }
*pobject = object;
return ret;
}
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 3e388ec31da8..f0db7eca9023 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1416,6 +1416,7 @@ static void wacom_remove(struct hid_device *hdev)
kfree(wacom);
}
+#ifdef CONFIG_PM
static int wacom_resume(struct hid_device *hdev)
{
struct wacom *wacom = hid_get_drvdata(hdev);
@@ -1436,6 +1437,7 @@ static int wacom_reset_resume(struct hid_device *hdev)
{
return wacom_resume(hdev);
}
+#endif /* CONFIG_PM */
static struct hid_driver wacom_driver = {
.name = "wacom",
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index d398f1321f14..c30204f2fa30 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -237,6 +237,31 @@ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count)
EXPORT_SYMBOL(input_mt_report_pointer_emulation);
/**
+ * input_mt_drop_unused() - Inactivate slots not seen in this frame
+ * @dev: input device with allocated MT slots
+ *
+ * Lift all slots not seen since the last call to this function.
+ */
+void input_mt_drop_unused(struct input_dev *dev)
+{
+ struct input_mt *mt = dev->mt;
+ int i;
+
+ if (!mt)
+ return;
+
+ for (i = 0; i < mt->num_slots; i++) {
+ if (!input_mt_is_used(mt, &mt->slots[i])) {
+ input_mt_slot(dev, i);
+ input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
+ }
+ }
+
+ mt->frame++;
+}
+EXPORT_SYMBOL(input_mt_drop_unused);
+
+/**
* input_mt_sync_frame() - synchronize mt frame
* @dev: input device with allocated MT slots
*
@@ -247,27 +272,18 @@ EXPORT_SYMBOL(input_mt_report_pointer_emulation);
void input_mt_sync_frame(struct input_dev *dev)
{
struct input_mt *mt = dev->mt;
- struct input_mt_slot *s;
bool use_count = false;
if (!mt)
return;
- if (mt->flags & INPUT_MT_DROP_UNUSED) {
- for (s = mt->slots; s != mt->slots + mt->num_slots; s++) {
- if (input_mt_is_used(mt, s))
- continue;
- input_mt_slot(dev, s - mt->slots);
- input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
- }
- }
+ if (mt->flags & INPUT_MT_DROP_UNUSED)
+ input_mt_drop_unused(dev);
if ((mt->flags & INPUT_MT_POINTER) && !(mt->flags & INPUT_MT_SEMI_MT))
use_count = true;
input_mt_report_pointer_emulation(dev, use_count);
-
- mt->frame++;
}
EXPORT_SYMBOL(input_mt_sync_frame);
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c
index 9135606c8649..ab0fdcd36e18 100644
--- a/drivers/input/joystick/analog.c
+++ b/drivers/input/joystick/analog.c
@@ -158,7 +158,7 @@ static unsigned int get_time_pit(void)
#define GET_TIME(x) rdtscl(x)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "TSC"
-#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_TILE)
+#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE)
#define GET_TIME(x) do { x = get_cycles(); } while (0)
#define DELTA(x,y) ((y)-(x))
#define TIME_NAME "get_cycles"
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 603fe0dd3682..177602cf7079 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -95,7 +95,8 @@
#define XTYPE_XBOX 0
#define XTYPE_XBOX360 1
#define XTYPE_XBOX360W 2
-#define XTYPE_UNKNOWN 3
+#define XTYPE_XBOXONE 3
+#define XTYPE_UNKNOWN 4
static bool dpad_to_buttons;
module_param(dpad_to_buttons, bool, S_IRUGO);
@@ -121,6 +122,7 @@ static const struct xpad_device {
{ 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
{ 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX },
{ 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
+ { 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -231,10 +233,12 @@ static const signed short xpad_abs_triggers[] = {
-1
};
-/* Xbox 360 has a vendor-specific class, so we cannot match it with only
+/*
+ * Xbox 360 has a vendor-specific class, so we cannot match it with only
* USB_INTERFACE_INFO (also specifically refused by USB subsystem), so we
* match against vendor id as well. Wired Xbox 360 devices have protocol 1,
- * wireless controllers have protocol 129. */
+ * wireless controllers have protocol 129.
+ */
#define XPAD_XBOX360_VENDOR_PROTOCOL(vend,pr) \
.match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, \
.idVendor = (vend), \
@@ -245,9 +249,20 @@ static const signed short xpad_abs_triggers[] = {
{ XPAD_XBOX360_VENDOR_PROTOCOL(vend,1) }, \
{ XPAD_XBOX360_VENDOR_PROTOCOL(vend,129) }
+/* The Xbox One controller uses subclass 71 and protocol 208. */
+#define XPAD_XBOXONE_VENDOR_PROTOCOL(vend, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, \
+ .idVendor = (vend), \
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC, \
+ .bInterfaceSubClass = 71, \
+ .bInterfaceProtocol = (pr)
+#define XPAD_XBOXONE_VENDOR(vend) \
+ { XPAD_XBOXONE_VENDOR_PROTOCOL(vend, 208) }
+
static struct usb_device_id xpad_table[] = {
{ USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
+ XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
{ USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
@@ -278,12 +293,10 @@ struct usb_xpad {
struct urb *bulk_out;
unsigned char *bdata;
-#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS)
struct urb *irq_out; /* urb for interrupt out report */
unsigned char *odata; /* output data */
dma_addr_t odata_dma;
struct mutex odata_mutex;
-#endif
#if defined(CONFIG_JOYSTICK_XPAD_LEDS)
struct xpad_led *led;
@@ -470,6 +483,105 @@ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned cha
xpad360_process_packet(xpad, cmd, &data[4]);
}
+/*
+ * xpadone_process_buttons
+ *
+ * Process a button update packet from an Xbox one controller.
+ */
+static void xpadone_process_buttons(struct usb_xpad *xpad,
+ struct input_dev *dev,
+ unsigned char *data)
+{
+ /* menu/view buttons */
+ input_report_key(dev, BTN_START, data[4] & 0x04);
+ input_report_key(dev, BTN_SELECT, data[4] & 0x08);
+
+ /* buttons A,B,X,Y */
+ input_report_key(dev, BTN_A, data[4] & 0x10);
+ input_report_key(dev, BTN_B, data[4] & 0x20);
+ input_report_key(dev, BTN_X, data[4] & 0x40);
+ input_report_key(dev, BTN_Y, data[4] & 0x80);
+
+ /* digital pad */
+ if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
+ /* dpad as buttons (left, right, up, down) */
+ input_report_key(dev, BTN_TRIGGER_HAPPY1, data[5] & 0x04);
+ input_report_key(dev, BTN_TRIGGER_HAPPY2, data[5] & 0x08);
+ input_report_key(dev, BTN_TRIGGER_HAPPY3, data[5] & 0x01);
+ input_report_key(dev, BTN_TRIGGER_HAPPY4, data[5] & 0x02);
+ } else {
+ input_report_abs(dev, ABS_HAT0X,
+ !!(data[5] & 0x08) - !!(data[5] & 0x04));
+ input_report_abs(dev, ABS_HAT0Y,
+ !!(data[5] & 0x02) - !!(data[5] & 0x01));
+ }
+
+ /* TL/TR */
+ input_report_key(dev, BTN_TL, data[5] & 0x10);
+ input_report_key(dev, BTN_TR, data[5] & 0x20);
+
+ /* stick press left/right */
+ input_report_key(dev, BTN_THUMBL, data[5] & 0x40);
+ input_report_key(dev, BTN_THUMBR, data[5] & 0x80);
+
+ if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
+ /* left stick */
+ input_report_abs(dev, ABS_X,
+ (__s16) le16_to_cpup((__le16 *)(data + 10)));
+ input_report_abs(dev, ABS_Y,
+ ~(__s16) le16_to_cpup((__le16 *)(data + 12)));
+
+ /* right stick */
+ input_report_abs(dev, ABS_RX,
+ (__s16) le16_to_cpup((__le16 *)(data + 14)));
+ input_report_abs(dev, ABS_RY,
+ ~(__s16) le16_to_cpup((__le16 *)(data + 16)));
+ }
+
+ /* triggers left/right */
+ if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
+ input_report_key(dev, BTN_TL2,
+ (__u16) le16_to_cpup((__le16 *)(data + 6)));
+ input_report_key(dev, BTN_TR2,
+ (__u16) le16_to_cpup((__le16 *)(data + 8)));
+ } else {
+ input_report_abs(dev, ABS_Z,
+ (__u16) le16_to_cpup((__le16 *)(data + 6)));
+ input_report_abs(dev, ABS_RZ,
+ (__u16) le16_to_cpup((__le16 *)(data + 8)));
+ }
+
+ input_sync(dev);
+}
+
+/*
+ * xpadone_process_packet
+ *
+ * Completes a request by converting the data into events for the
+ * input subsystem. This version is for the Xbox One controller.
+ *
+ * The report format was gleaned from
+ * https://github.com/kylelemons/xbox/blob/master/xbox.go
+ */
+
+static void xpadone_process_packet(struct usb_xpad *xpad,
+ u16 cmd, unsigned char *data)
+{
+ struct input_dev *dev = xpad->dev;
+
+ switch (data[0]) {
+ case 0x20:
+ xpadone_process_buttons(xpad, dev, data);
+ break;
+
+ case 0x07:
+ /* the xbox button has its own special report */
+ input_report_key(dev, BTN_MODE, data[4] & 0x01);
+ input_sync(dev);
+ break;
+ }
+}
+
static void xpad_irq_in(struct urb *urb)
{
struct usb_xpad *xpad = urb->context;
@@ -502,6 +614,9 @@ static void xpad_irq_in(struct urb *urb)
case XTYPE_XBOX360W:
xpad360w_process_packet(xpad, 0, xpad->idata);
break;
+ case XTYPE_XBOXONE:
+ xpadone_process_packet(xpad, 0, xpad->idata);
+ break;
default:
xpad_process_packet(xpad, 0, xpad->idata);
}
@@ -535,7 +650,6 @@ static void xpad_bulk_out(struct urb *urb)
}
}
-#if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS)
static void xpad_irq_out(struct urb *urb)
{
struct usb_xpad *xpad = urb->context;
@@ -573,6 +687,7 @@ exit:
static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
{
struct usb_endpoint_descriptor *ep_irq_out;
+ int ep_irq_out_idx;
int error;
if (xpad->xtype == XTYPE_UNKNOWN)
@@ -593,7 +708,10 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
goto fail2;
}
- ep_irq_out = &intf->cur_altsetting->endpoint[1].desc;
+ /* Xbox One controller has in/out endpoints swapped. */
+ ep_irq_out_idx = xpad->xtype == XTYPE_XBOXONE ? 0 : 1;
+ ep_irq_out = &intf->cur_altsetting->endpoint[ep_irq_out_idx].desc;
+
usb_fill_int_urb(xpad->irq_out, xpad->udev,
usb_sndintpipe(xpad->udev, ep_irq_out->bEndpointAddress),
xpad->odata, XPAD_PKT_LEN,
@@ -621,11 +739,6 @@ static void xpad_deinit_output(struct usb_xpad *xpad)
xpad->odata, xpad->odata_dma);
}
}
-#else
-static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad) { return 0; }
-static void xpad_deinit_output(struct usb_xpad *xpad) {}
-static void xpad_stop_output(struct usb_xpad *xpad) {}
-#endif
#ifdef CONFIG_JOYSTICK_XPAD_FF
static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect)
@@ -692,7 +805,7 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
static int xpad_init_ff(struct usb_xpad *xpad)
{
- if (xpad->xtype == XTYPE_UNKNOWN)
+ if (xpad->xtype == XTYPE_UNKNOWN || xpad->xtype == XTYPE_XBOXONE)
return 0;
input_set_capability(xpad->dev, EV_FF, FF_RUMBLE);
@@ -801,6 +914,14 @@ static int xpad_open(struct input_dev *dev)
if (usb_submit_urb(xpad->irq_in, GFP_KERNEL))
return -EIO;
+ if (xpad->xtype == XTYPE_XBOXONE) {
+ /* Xbox one controller needs to be initialized. */
+ xpad->odata[0] = 0x05;
+ xpad->odata[1] = 0x20;
+ xpad->irq_out->transfer_buffer_length = 2;
+ return usb_submit_urb(xpad->irq_out, GFP_KERNEL);
+ }
+
return 0;
}
@@ -816,6 +937,7 @@ static void xpad_close(struct input_dev *dev)
static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs)
{
+ struct usb_xpad *xpad = input_get_drvdata(input_dev);
set_bit(abs, input_dev->absbit);
switch (abs) {
@@ -827,7 +949,10 @@ static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs)
break;
case ABS_Z:
case ABS_RZ: /* the triggers (if mapped to axes) */
- input_set_abs_params(input_dev, abs, 0, 255, 0, 0);
+ if (xpad->xtype == XTYPE_XBOXONE)
+ input_set_abs_params(input_dev, abs, 0, 1023, 0, 0);
+ else
+ input_set_abs_params(input_dev, abs, 0, 255, 0, 0);
break;
case ABS_HAT0X:
case ABS_HAT0Y: /* the d-pad (only if dpad is mapped to axes */
@@ -842,6 +967,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
struct usb_xpad *xpad;
struct input_dev *input_dev;
struct usb_endpoint_descriptor *ep_irq_in;
+ int ep_irq_in_idx;
int i, error;
for (i = 0; xpad_device[i].idVendor; i++) {
@@ -850,6 +976,16 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
break;
}
+ if (xpad_device[i].xtype == XTYPE_XBOXONE &&
+ intf->cur_altsetting->desc.bInterfaceNumber != 0) {
+ /*
+ * The Xbox One controller lists three interfaces all with the
+ * same interface class, subclass and protocol. Differentiate by
+ * interface number.
+ */
+ return -ENODEV;
+ }
+
xpad = kzalloc(sizeof(struct usb_xpad), GFP_KERNEL);
input_dev = input_allocate_device();
if (!xpad || !input_dev) {
@@ -920,7 +1056,8 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
__set_bit(xpad_common_btn[i], input_dev->keybit);
/* set up model-specific ones */
- if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX360W) {
+ if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX360W ||
+ xpad->xtype == XTYPE_XBOXONE) {
for (i = 0; xpad360_btn[i] >= 0; i++)
__set_bit(xpad360_btn[i], input_dev->keybit);
} else {
@@ -933,7 +1070,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
__set_bit(xpad_btn_pad[i], input_dev->keybit);
} else {
for (i = 0; xpad_abs_pad[i] >= 0; i++)
- xpad_set_up_abs(input_dev, xpad_abs_pad[i]);
+ xpad_set_up_abs(input_dev, xpad_abs_pad[i]);
}
if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
@@ -956,7 +1093,10 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
if (error)
goto fail5;
- ep_irq_in = &intf->cur_altsetting->endpoint[0].desc;
+ /* Xbox One controller has in/out endpoints swapped. */
+ ep_irq_in_idx = xpad->xtype == XTYPE_XBOXONE ? 1 : 0;
+ ep_irq_in = &intf->cur_altsetting->endpoint[ep_irq_in_idx].desc;
+
usb_fill_int_urb(xpad->irq_in, udev,
usb_rcvintpipe(udev, ep_irq_in->bEndpointAddress),
xpad->idata, XPAD_PKT_LEN, xpad_irq_in,
diff --git a/drivers/input/keyboard/cap1106.c b/drivers/input/keyboard/cap1106.c
index f7d7a0d4ab4e..180b184ab90f 100644
--- a/drivers/input/keyboard/cap1106.c
+++ b/drivers/input/keyboard/cap1106.c
@@ -64,7 +64,7 @@ struct cap1106_priv {
struct input_dev *idev;
/* config */
- unsigned int keycodes[CAP1106_NUM_CHN];
+ unsigned short keycodes[CAP1106_NUM_CHN];
};
static const struct reg_default cap1106_reg_defaults[] = {
@@ -272,6 +272,12 @@ static int cap1106_i2c_probe(struct i2c_client *i2c_client,
for (i = 0; i < CAP1106_NUM_CHN; i++)
__set_bit(priv->keycodes[i], priv->idev->keybit);
+ __clear_bit(KEY_RESERVED, priv->idev->keybit);
+
+ priv->idev->keycode = priv->keycodes;
+ priv->idev->keycodesize = sizeof(priv->keycodes[0]);
+ priv->idev->keycodemax = ARRAY_SIZE(priv->keycodes);
+
priv->idev->id.vendor = CAP1106_MANUFACTURER_ID;
priv->idev->id.product = CAP1106_PRODUCT_ID;
priv->idev->id.version = rev;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index ef9e0b8a9aa7..e8573c68f77e 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -117,6 +117,9 @@ void synaptics_reset(struct psmouse *psmouse)
}
#ifdef CONFIG_MOUSE_PS2_SYNAPTICS
+
+static bool cr48_profile_sensor;
+
struct min_max_quirk {
const char * const *pnp_ids;
int x_min, x_max, y_min, y_max;
@@ -1152,6 +1155,42 @@ static void synaptics_image_sensor_process(struct psmouse *psmouse,
priv->agm_pending = false;
}
+static void synaptics_profile_sensor_process(struct psmouse *psmouse,
+ struct synaptics_hw_state *sgm,
+ int num_fingers)
+{
+ struct input_dev *dev = psmouse->dev;
+ struct synaptics_data *priv = psmouse->private;
+ struct synaptics_hw_state *hw[2] = { sgm, &priv->agm };
+ struct input_mt_pos pos[2];
+ int slot[2], nsemi, i;
+
+ nsemi = clamp_val(num_fingers, 0, 2);
+
+ for (i = 0; i < nsemi; i++) {
+ pos[i].x = hw[i]->x;
+ pos[i].y = synaptics_invert_y(hw[i]->y);
+ }
+
+ input_mt_assign_slots(dev, slot, pos, nsemi);
+
+ for (i = 0; i < nsemi; i++) {
+ input_mt_slot(dev, slot[i]);
+ input_mt_report_slot_state(dev, MT_TOOL_FINGER, true);
+ input_report_abs(dev, ABS_MT_POSITION_X, pos[i].x);
+ input_report_abs(dev, ABS_MT_POSITION_Y, pos[i].y);
+ input_report_abs(dev, ABS_MT_PRESSURE, hw[i]->z);
+ }
+
+ input_mt_drop_unused(dev);
+ input_mt_report_pointer_emulation(dev, false);
+ input_mt_report_finger_count(dev, num_fingers);
+
+ synaptics_report_buttons(psmouse, sgm);
+
+ input_sync(dev);
+}
+
/*
* called for each full received packet from the touchpad
*/
@@ -1215,6 +1254,11 @@ static void synaptics_process_packet(struct psmouse *psmouse)
finger_width = 0;
}
+ if (cr48_profile_sensor) {
+ synaptics_profile_sensor_process(psmouse, &hw, num_fingers);
+ return;
+ }
+
if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c))
synaptics_report_semi_mt_data(dev, &hw, &priv->agm,
num_fingers);
@@ -1360,6 +1404,9 @@ static void set_input_params(struct psmouse *psmouse,
set_abs_position_params(dev, priv, ABS_X, ABS_Y);
input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
+ if (cr48_profile_sensor)
+ input_set_abs_params(dev, ABS_MT_PRESSURE, 0, 255, 0, 0);
+
if (SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) {
set_abs_position_params(dev, priv, ABS_MT_POSITION_X,
ABS_MT_POSITION_Y);
@@ -1371,11 +1418,16 @@ static void set_input_params(struct psmouse *psmouse,
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
__set_bit(BTN_TOOL_QUINTTAP, dev->keybit);
} else if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c)) {
- /* Non-image sensors with AGM use semi-mt */
- __set_bit(INPUT_PROP_SEMI_MT, dev->propbit);
- input_mt_init_slots(dev, 2, 0);
set_abs_position_params(dev, priv, ABS_MT_POSITION_X,
ABS_MT_POSITION_Y);
+ /*
+ * Profile sensor in CR-48 tracks contacts reasonably well,
+ * other non-image sensors with AGM use semi-mt.
+ */
+ input_mt_init_slots(dev, 2,
+ INPUT_MT_POINTER |
+ (cr48_profile_sensor ?
+ INPUT_MT_TRACK : INPUT_MT_SEMI_MT));
}
if (SYN_CAP_PALMDETECT(priv->capabilities))
@@ -1577,10 +1629,24 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
{ }
};
+static const struct dmi_system_id __initconst cr48_dmi_table[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ {
+ /* Cr-48 Chromebook (Codename Mario) */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "IEC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
+ },
+ },
+#endif
+ { }
+};
+
void __init synaptics_module_init(void)
{
impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
broken_olpc_ec = dmi_check_system(olpc_dmi_table);
+ cr48_profile_sensor = dmi_check_system(cr48_dmi_table);
}
static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 03b85711cb70..db178ed2b47e 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -359,7 +359,6 @@ static int mxt_bootloader_read(struct mxt_data *data,
msg.buf = val;
ret = i2c_transfer(data->client->adapter, &msg, 1);
-
if (ret == 1) {
ret = 0;
} else {
@@ -414,6 +413,7 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry)
case 0x5b:
bootloader = appmode - 0x26;
break;
+
default:
dev_err(&data->client->dev,
"Appmode i2c address 0x%02x not found\n",
@@ -425,20 +425,20 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry)
return 0;
}
-static int mxt_probe_bootloader(struct mxt_data *data, bool retry)
+static int mxt_probe_bootloader(struct mxt_data *data, bool alt_address)
{
struct device *dev = &data->client->dev;
- int ret;
+ int error;
u8 val;
bool crc_failure;
- ret = mxt_lookup_bootloader_address(data, retry);
- if (ret)
- return ret;
+ error = mxt_lookup_bootloader_address(data, alt_address);
+ if (error)
+ return error;
- ret = mxt_bootloader_read(data, &val, 1);
- if (ret)
- return ret;
+ error = mxt_bootloader_read(data, &val, 1);
+ if (error)
+ return error;
/* Check app crc fail mode */
crc_failure = (val & ~MXT_BOOT_STATUS_MASK) == MXT_APP_CRC_FAIL;
@@ -1064,6 +1064,137 @@ static u32 mxt_calculate_crc(u8 *base, off_t start_off, off_t end_off)
return crc;
}
+static int mxt_prepare_cfg_mem(struct mxt_data *data,
+ const struct firmware *cfg,
+ unsigned int data_pos,
+ unsigned int cfg_start_ofs,
+ u8 *config_mem,
+ size_t config_mem_size)
+{
+ struct device *dev = &data->client->dev;
+ struct mxt_object *object;
+ unsigned int type, instance, size, byte_offset;
+ int offset;
+ int ret;
+ int i;
+ u16 reg;
+ u8 val;
+
+ while (data_pos < cfg->size) {
+ /* Read type, instance, length */
+ ret = sscanf(cfg->data + data_pos, "%x %x %x%n",
+ &type, &instance, &size, &offset);
+ if (ret == 0) {
+ /* EOF */
+ break;
+ } else if (ret != 3) {
+ dev_err(dev, "Bad format: failed to parse object\n");
+ return -EINVAL;
+ }
+ data_pos += offset;
+
+ object = mxt_get_object(data, type);
+ if (!object) {
+ /* Skip object */
+ for (i = 0; i < size; i++) {
+ ret = sscanf(cfg->data + data_pos, "%hhx%n",
+ &val, &offset);
+ if (ret != 1) {
+ dev_err(dev, "Bad format in T%d at %d\n",
+ type, i);
+ return -EINVAL;
+ }
+ data_pos += offset;
+ }
+ continue;
+ }
+
+ if (size > mxt_obj_size(object)) {
+ /*
+ * Either we are in fallback mode due to wrong
+ * config or config from a later fw version,
+ * or the file is corrupt or hand-edited.
+ */
+ dev_warn(dev, "Discarding %zu byte(s) in T%u\n",
+ size - mxt_obj_size(object), type);
+ } else if (mxt_obj_size(object) > size) {
+ /*
+ * If firmware is upgraded, new bytes may be added to
+ * end of objects. It is generally forward compatible
+ * to zero these bytes - previous behaviour will be
+ * retained. However this does invalidate the CRC and
+ * will force fallback mode until the configuration is
+ * updated. We warn here but do nothing else - the
+ * malloc has zeroed the entire configuration.
+ */
+ dev_warn(dev, "Zeroing %zu byte(s) in T%d\n",
+ mxt_obj_size(object) - size, type);
+ }
+
+ if (instance >= mxt_obj_instances(object)) {
+ dev_err(dev, "Object instances exceeded!\n");
+ return -EINVAL;
+ }
+
+ reg = object->start_address + mxt_obj_size(object) * instance;
+
+ for (i = 0; i < size; i++) {
+ ret = sscanf(cfg->data + data_pos, "%hhx%n",
+ &val,
+ &offset);
+ if (ret != 1) {
+ dev_err(dev, "Bad format in T%d at %d\n",
+ type, i);
+ return -EINVAL;
+ }
+ data_pos += offset;
+
+ if (i > mxt_obj_size(object))
+ continue;
+
+ byte_offset = reg + i - cfg_start_ofs;
+
+ if (byte_offset >= 0 && byte_offset < config_mem_size) {
+ *(config_mem + byte_offset) = val;
+ } else {
+ dev_err(dev, "Bad object: reg:%d, T%d, ofs=%d\n",
+ reg, object->type, byte_offset);
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int mxt_upload_cfg_mem(struct mxt_data *data, unsigned int cfg_start,
+ u8 *config_mem, size_t config_mem_size)
+{
+ unsigned int byte_offset = 0;
+ int error;
+
+ /* Write configuration as blocks */
+ while (byte_offset < config_mem_size) {
+ unsigned int size = config_mem_size - byte_offset;
+
+ if (size > MXT_MAX_BLOCK_WRITE)
+ size = MXT_MAX_BLOCK_WRITE;
+
+ error = __mxt_write_reg(data->client,
+ cfg_start + byte_offset,
+ size, config_mem + byte_offset);
+ if (error) {
+ dev_err(&data->client->dev,
+ "Config write error, ret=%d\n", error);
+ return error;
+ }
+
+ byte_offset += size;
+ }
+
+ return 0;
+}
+
/*
* mxt_update_cfg - download configuration to chip
*
@@ -1087,26 +1218,20 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
{
struct device *dev = &data->client->dev;
struct mxt_info cfg_info;
- struct mxt_object *object;
int ret;
int offset;
int data_pos;
- int byte_offset;
int i;
int cfg_start_ofs;
u32 info_crc, config_crc, calculated_crc;
u8 *config_mem;
size_t config_mem_size;
- unsigned int type, instance, size;
- u8 val;
- u16 reg;
mxt_update_crc(data, MXT_COMMAND_REPORTALL, 1);
if (strncmp(cfg->data, MXT_CFG_MAGIC, strlen(MXT_CFG_MAGIC))) {
dev_err(dev, "Unrecognised config file\n");
- ret = -EINVAL;
- goto release;
+ return -EINVAL;
}
data_pos = strlen(MXT_CFG_MAGIC);
@@ -1118,8 +1243,7 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
&offset);
if (ret != 1) {
dev_err(dev, "Bad format\n");
- ret = -EINVAL;
- goto release;
+ return -EINVAL;
}
data_pos += offset;
@@ -1127,30 +1251,26 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
if (cfg_info.family_id != data->info.family_id) {
dev_err(dev, "Family ID mismatch!\n");
- ret = -EINVAL;
- goto release;
+ return -EINVAL;
}
if (cfg_info.variant_id != data->info.variant_id) {
dev_err(dev, "Variant ID mismatch!\n");
- ret = -EINVAL;
- goto release;
+ return -EINVAL;
}
/* Read CRCs */
ret = sscanf(cfg->data + data_pos, "%x%n", &info_crc, &offset);
if (ret != 1) {
dev_err(dev, "Bad format: failed to parse Info CRC\n");
- ret = -EINVAL;
- goto release;
+ return -EINVAL;
}
data_pos += offset;
ret = sscanf(cfg->data + data_pos, "%x%n", &config_crc, &offset);
if (ret != 1) {
dev_err(dev, "Bad format: failed to parse Config CRC\n");
- ret = -EINVAL;
- goto release;
+ return -EINVAL;
}
data_pos += offset;
@@ -1166,8 +1286,7 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
} else if (config_crc == data->config_crc) {
dev_dbg(dev, "Config CRC 0x%06X: OK\n",
data->config_crc);
- ret = 0;
- goto release;
+ return 0;
} else {
dev_info(dev, "Config CRC 0x%06X: does not match file 0x%06X\n",
data->config_crc, config_crc);
@@ -1186,93 +1305,13 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
config_mem = kzalloc(config_mem_size, GFP_KERNEL);
if (!config_mem) {
dev_err(dev, "Failed to allocate memory\n");
- ret = -ENOMEM;
- goto release;
+ return -ENOMEM;
}
- while (data_pos < cfg->size) {
- /* Read type, instance, length */
- ret = sscanf(cfg->data + data_pos, "%x %x %x%n",
- &type, &instance, &size, &offset);
- if (ret == 0) {
- /* EOF */
- break;
- } else if (ret != 3) {
- dev_err(dev, "Bad format: failed to parse object\n");
- ret = -EINVAL;
- goto release_mem;
- }
- data_pos += offset;
-
- object = mxt_get_object(data, type);
- if (!object) {
- /* Skip object */
- for (i = 0; i < size; i++) {
- ret = sscanf(cfg->data + data_pos, "%hhx%n",
- &val,
- &offset);
- data_pos += offset;
- }
- continue;
- }
-
- if (size > mxt_obj_size(object)) {
- /*
- * Either we are in fallback mode due to wrong
- * config or config from a later fw version,
- * or the file is corrupt or hand-edited.
- */
- dev_warn(dev, "Discarding %zu byte(s) in T%u\n",
- size - mxt_obj_size(object), type);
- } else if (mxt_obj_size(object) > size) {
- /*
- * If firmware is upgraded, new bytes may be added to
- * end of objects. It is generally forward compatible
- * to zero these bytes - previous behaviour will be
- * retained. However this does invalidate the CRC and
- * will force fallback mode until the configuration is
- * updated. We warn here but do nothing else - the
- * malloc has zeroed the entire configuration.
- */
- dev_warn(dev, "Zeroing %zu byte(s) in T%d\n",
- mxt_obj_size(object) - size, type);
- }
-
- if (instance >= mxt_obj_instances(object)) {
- dev_err(dev, "Object instances exceeded!\n");
- ret = -EINVAL;
- goto release_mem;
- }
-
- reg = object->start_address + mxt_obj_size(object) * instance;
-
- for (i = 0; i < size; i++) {
- ret = sscanf(cfg->data + data_pos, "%hhx%n",
- &val,
- &offset);
- if (ret != 1) {
- dev_err(dev, "Bad format in T%d\n", type);
- ret = -EINVAL;
- goto release_mem;
- }
- data_pos += offset;
-
- if (i > mxt_obj_size(object))
- continue;
-
- byte_offset = reg + i - cfg_start_ofs;
-
- if ((byte_offset >= 0)
- && (byte_offset <= config_mem_size)) {
- *(config_mem + byte_offset) = val;
- } else {
- dev_err(dev, "Bad object: reg:%d, T%d, ofs=%d\n",
- reg, object->type, byte_offset);
- ret = -EINVAL;
- goto release_mem;
- }
- }
- }
+ ret = mxt_prepare_cfg_mem(data, cfg, data_pos, cfg_start_ofs,
+ config_mem, config_mem_size);
+ if (ret)
+ goto release_mem;
/* Calculate crc of the received configs (not the raw config file) */
if (data->T7_address < cfg_start_ofs) {
@@ -1286,28 +1325,14 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
data->T7_address - cfg_start_ofs,
config_mem_size);
- if (config_crc > 0 && (config_crc != calculated_crc))
+ if (config_crc > 0 && config_crc != calculated_crc)
dev_warn(dev, "Config CRC error, calculated=%06X, file=%06X\n",
calculated_crc, config_crc);
- /* Write configuration as blocks */
- byte_offset = 0;
- while (byte_offset < config_mem_size) {
- size = config_mem_size - byte_offset;
-
- if (size > MXT_MAX_BLOCK_WRITE)
- size = MXT_MAX_BLOCK_WRITE;
-
- ret = __mxt_write_reg(data->client,
- cfg_start_ofs + byte_offset,
- size, config_mem + byte_offset);
- if (ret != 0) {
- dev_err(dev, "Config write error, ret=%d\n", ret);
- goto release_mem;
- }
-
- byte_offset += size;
- }
+ ret = mxt_upload_cfg_mem(data, cfg_start_ofs,
+ config_mem, config_mem_size);
+ if (ret)
+ goto release_mem;
mxt_update_crc(data, MXT_COMMAND_BACKUPNV, MXT_BACKUP_VALUE);
@@ -1319,8 +1344,6 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
release_mem:
kfree(config_mem);
-release:
- release_firmware(cfg);
return ret;
}
@@ -1422,10 +1445,12 @@ static int mxt_get_object_table(struct mxt_data *data)
switch (object->type) {
case MXT_GEN_MESSAGE_T5:
- if (data->info.family_id == 0x80) {
+ if (data->info.family_id == 0x80 &&
+ data->info.version < 0x20) {
/*
- * On mXT224 read and discard unused CRC byte
- * otherwise DMA reads are misaligned
+ * On mXT224 firmware versions prior to V2.0
+ * read and discard unused CRC byte otherwise
+ * DMA reads are misaligned.
*/
data->T5_msg_size = mxt_obj_size(object);
} else {
@@ -1433,6 +1458,7 @@ static int mxt_get_object_table(struct mxt_data *data)
data->T5_msg_size = mxt_obj_size(object) - 1;
}
data->T5_address = object->start_address;
+ break;
case MXT_GEN_COMMAND_T6:
data->T6_reportid = min_id;
data->T6_address = object->start_address;
@@ -1638,46 +1664,45 @@ static int mxt_configure_objects(struct mxt_data *data,
static void mxt_config_cb(const struct firmware *cfg, void *ctx)
{
mxt_configure_objects(ctx, cfg);
+ release_firmware(cfg);
}
static int mxt_initialize(struct mxt_data *data)
{
struct i2c_client *client = data->client;
+ int recovery_attempts = 0;
int error;
- bool alt_bootloader_addr = false;
- bool retry = false;
-retry_info:
- error = mxt_get_info(data);
- if (error) {
-retry_bootloader:
- error = mxt_probe_bootloader(data, alt_bootloader_addr);
+ while (1) {
+ error = mxt_get_info(data);
+ if (!error)
+ break;
+
+ /* Check bootloader state */
+ error = mxt_probe_bootloader(data, false);
if (error) {
- if (alt_bootloader_addr) {
+ dev_info(&client->dev, "Trying alternate bootloader address\n");
+ error = mxt_probe_bootloader(data, true);
+ if (error) {
/* Chip is not in appmode or bootloader mode */
return error;
}
+ }
- dev_info(&client->dev, "Trying alternate bootloader address\n");
- alt_bootloader_addr = true;
- goto retry_bootloader;
- } else {
- if (retry) {
- dev_err(&client->dev, "Could not recover from bootloader mode\n");
- /*
- * We can reflash from this state, so do not
- * abort init
- */
- data->in_bootloader = true;
- return 0;
- }
-
- /* Attempt to exit bootloader into app mode */
- mxt_send_bootloader_cmd(data, false);
- msleep(MXT_FW_RESET_TIME);
- retry = true;
- goto retry_info;
+ /* OK, we are in bootloader, see if we can recover */
+ if (++recovery_attempts > 1) {
+ dev_err(&client->dev, "Could not recover from bootloader mode\n");
+ /*
+ * We can reflash from this state, so do not
+ * abort initialization.
+ */
+ data->in_bootloader = true;
+ return 0;
}
+
+ /* Attempt to exit bootloader into app mode */
+ mxt_send_bootloader_cmd(data, false);
+ msleep(MXT_FW_RESET_TIME);
}
/* Get object table information */
@@ -1687,13 +1712,18 @@ retry_bootloader:
return error;
}
- mxt_acquire_irq(data);
+ error = mxt_acquire_irq(data);
if (error)
goto err_free_object_table;
- request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME,
- &data->client->dev, GFP_KERNEL, data,
- mxt_config_cb);
+ error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME,
+ &client->dev, GFP_KERNEL, data,
+ mxt_config_cb);
+ if (error) {
+ dev_err(&client->dev, "Failed to invoke firmware loader: %d\n",
+ error);
+ goto err_free_object_table;
+ }
return 0;
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 5a6d50c004d7..8857d5b9be71 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -262,7 +262,6 @@ static int edt_ft5x06_register_write(struct edt_ft5x06_ts_data *tsdata,
case M06:
wrbuf[0] = tsdata->factory_mode ? 0xf3 : 0xfc;
wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f;
- wrbuf[1] = tsdata->factory_mode ? addr & 0x7f : addr & 0x3f;
wrbuf[2] = value;
wrbuf[3] = wrbuf[0] ^ wrbuf[1] ^ wrbuf[2];
return edt_ft5x06_ts_readwrite(tsdata->client, 4,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index f9f3a1224dfa..ea025e4806b6 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2097,7 +2097,7 @@ static void iscsi_check_transport_timeouts(unsigned long data)
conn->ping_timeout, conn->recv_timeout,
last_recv, conn->last_ping, jiffies);
spin_unlock(&session->frwd_lock);
- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
+ iscsi_conn_failure(conn, ISCSI_ERR_NOP_TIMEDOUT);
return;
}
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index d3a08aea0948..7abbf284da1a 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -526,18 +526,19 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
{
struct pm8001_ioctl_payload *payload;
DECLARE_COMPLETION_ONSTACK(completion);
- u8 *ioctlbuffer = NULL;
- u32 length = 0;
- u32 ret = 0;
+ u8 *ioctlbuffer;
+ u32 ret;
+ u32 length = 1024 * 5 + sizeof(*payload) - 1;
+
+ if (pm8001_ha->fw_image->size > 4096) {
+ pm8001_ha->fw_status = FAIL_FILE_SIZE;
+ return -EFAULT;
+ }
- length = 1024 * 5 + sizeof(*payload) - 1;
ioctlbuffer = kzalloc(length, GFP_KERNEL);
- if (!ioctlbuffer)
+ if (!ioctlbuffer) {
+ pm8001_ha->fw_status = FAIL_OUT_MEMORY;
return -ENOMEM;
- if ((pm8001_ha->fw_image->size <= 0) ||
- (pm8001_ha->fw_image->size > 4096)) {
- ret = FAIL_FILE_SIZE;
- goto out;
}
payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data,
@@ -547,6 +548,10 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
payload->minor_function = 0x1;
pm8001_ha->nvmd_completion = &completion;
ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload);
+ if (ret) {
+ pm8001_ha->fw_status = FAIL_OUT_MEMORY;
+ goto out;
+ }
wait_for_completion(&completion);
out:
kfree(ioctlbuffer);
@@ -557,35 +562,31 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
{
struct pm8001_ioctl_payload *payload;
DECLARE_COMPLETION_ONSTACK(completion);
- u8 *ioctlbuffer = NULL;
- u32 length = 0;
+ u8 *ioctlbuffer;
struct fw_control_info *fwControl;
- u32 loopNumber, loopcount = 0;
- u32 sizeRead = 0;
u32 partitionSize, partitionSizeTmp;
- u32 ret = 0;
- u32 partitionNumber = 0;
+ u32 loopNumber, loopcount;
struct pm8001_fw_image_header *image_hdr;
+ u32 sizeRead = 0;
+ u32 ret = 0;
+ u32 length = 1024 * 16 + sizeof(*payload) - 1;
- length = 1024 * 16 + sizeof(*payload) - 1;
+ if (pm8001_ha->fw_image->size < 28) {
+ pm8001_ha->fw_status = FAIL_FILE_SIZE;
+ return -EFAULT;
+ }
ioctlbuffer = kzalloc(length, GFP_KERNEL);
- image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data;
- if (!ioctlbuffer)
+ if (!ioctlbuffer) {
+ pm8001_ha->fw_status = FAIL_OUT_MEMORY;
return -ENOMEM;
- if (pm8001_ha->fw_image->size < 28) {
- ret = FAIL_FILE_SIZE;
- goto out;
}
-
+ image_hdr = (struct pm8001_fw_image_header *)pm8001_ha->fw_image->data;
while (sizeRead < pm8001_ha->fw_image->size) {
partitionSizeTmp =
*(u32 *)((u8 *)&image_hdr->image_length + sizeRead);
partitionSize = be32_to_cpu(partitionSizeTmp);
- loopcount = (partitionSize + HEADER_LEN)/IOCTL_BUF_SIZE;
- if (loopcount % IOCTL_BUF_SIZE)
- loopcount++;
- if (loopcount == 0)
- loopcount++;
+ loopcount = DIV_ROUND_UP(partitionSize + HEADER_LEN,
+ IOCTL_BUF_SIZE);
for (loopNumber = 0; loopNumber < loopcount; loopNumber++) {
payload = (struct pm8001_ioctl_payload *)ioctlbuffer;
payload->length = 1024*16;
@@ -617,18 +618,18 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->nvmd_completion = &completion;
ret = PM8001_CHIP_DISP->fw_flash_update_req(pm8001_ha, payload);
- if (ret)
- break;
+ if (ret) {
+ pm8001_ha->fw_status = FAIL_OUT_MEMORY;
+ goto out;
+ }
wait_for_completion(&completion);
if (fwControl->retcode > FLASH_UPDATE_IN_PROGRESS) {
- ret = fwControl->retcode;
- break;
+ pm8001_ha->fw_status = fwControl->retcode;
+ ret = -EFAULT;
+ goto out;
+ }
}
}
- if (ret)
- break;
- partitionNumber++;
-}
out:
kfree(ioctlbuffer);
return ret;
@@ -643,22 +644,29 @@ static ssize_t pm8001_store_update_fw(struct device *cdev,
char *cmd_ptr, *filename_ptr;
int res, i;
int flash_command = FLASH_CMD_NONE;
- int err = 0;
+ int ret;
+
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
- cmd_ptr = kzalloc(count*2, GFP_KERNEL);
+ /* this test protects us from running two flash processes at once,
+ * so we should start with this test */
+ if (pm8001_ha->fw_status == FLASH_IN_PROGRESS)
+ return -EINPROGRESS;
+ pm8001_ha->fw_status = FLASH_IN_PROGRESS;
+ cmd_ptr = kzalloc(count*2, GFP_KERNEL);
if (!cmd_ptr) {
- err = FAIL_OUT_MEMORY;
- goto out;
+ pm8001_ha->fw_status = FAIL_OUT_MEMORY;
+ return -ENOMEM;
}
filename_ptr = cmd_ptr + count;
res = sscanf(buf, "%s %s", cmd_ptr, filename_ptr);
if (res != 2) {
- err = FAIL_PARAMETERS;
- goto out1;
+ pm8001_ha->fw_status = FAIL_PARAMETERS;
+ ret = -EINVAL;
+ goto out;
}
for (i = 0; flash_command_table[i].code != FLASH_CMD_NONE; i++) {
@@ -669,50 +677,38 @@ static ssize_t pm8001_store_update_fw(struct device *cdev,
}
}
if (flash_command == FLASH_CMD_NONE) {
- err = FAIL_PARAMETERS;
- goto out1;
+ pm8001_ha->fw_status = FAIL_PARAMETERS;
+ ret = -EINVAL;
+ goto out;
}
- if (pm8001_ha->fw_status == FLASH_IN_PROGRESS) {
- err = FLASH_IN_PROGRESS;
- goto out1;
- }
- err = request_firmware(&pm8001_ha->fw_image,
+ ret = request_firmware(&pm8001_ha->fw_image,
filename_ptr,
pm8001_ha->dev);
- if (err) {
+ if (ret) {
PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("Failed to load firmware image file %s,"
- " error %d\n", filename_ptr, err));
- err = FAIL_OPEN_BIOS_FILE;
- goto out1;
+ pm8001_printk(
+ "Failed to load firmware image file %s, error %d\n",
+ filename_ptr, ret));
+ pm8001_ha->fw_status = FAIL_OPEN_BIOS_FILE;
+ goto out;
}
- switch (flash_command) {
- case FLASH_CMD_UPDATE:
- pm8001_ha->fw_status = FLASH_IN_PROGRESS;
- err = pm8001_update_flash(pm8001_ha);
- break;
- case FLASH_CMD_SET_NVMD:
- pm8001_ha->fw_status = FLASH_IN_PROGRESS;
- err = pm8001_set_nvmd(pm8001_ha);
- break;
- default:
- pm8001_ha->fw_status = FAIL_PARAMETERS;
- err = FAIL_PARAMETERS;
- break;
- }
+ if (FLASH_CMD_UPDATE == flash_command)
+ ret = pm8001_update_flash(pm8001_ha);
+ else
+ ret = pm8001_set_nvmd(pm8001_ha);
+
release_firmware(pm8001_ha->fw_image);
-out1:
- kfree(cmd_ptr);
out:
- pm8001_ha->fw_status = err;
+ kfree(cmd_ptr);
- if (!err)
- return count;
- else
- return -err;
+ if (ret)
+ return ret;
+
+ pm8001_ha->fw_status = FLASH_OK;
+ return count;
}
static ssize_t pm8001_show_update_fw(struct device *cdev,
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 173831016f5f..dd12c6fe57a6 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4824,7 +4824,7 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha,
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc) {
kfree(fw_control_context);
- return rc;
+ return -EBUSY;
}
ccb = &pm8001_ha->ccb_info[tag];
ccb->fw_control_context = fw_control_context;
@@ -4946,7 +4946,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha,
rc = pm8001_tag_alloc(pm8001_ha, &tag);
if (rc) {
kfree(fw_control_context);
- return rc;
+ return -EBUSY;
}
ccb = &pm8001_ha->ccb_info[tag];
ccb->fw_control_context = fw_control_context;
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index e49623a897a7..666bf5af06e2 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -748,34 +748,35 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha)
sizeof(pm8001_ha->msix_entries[0]);
for (i = 0; i < max_entry ; i++)
pm8001_ha->msix_entries[i].entry = i;
- rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries,
+ rc = pci_enable_msix_exact(pm8001_ha->pdev, pm8001_ha->msix_entries,
number_of_intr);
pm8001_ha->number_of_intr = number_of_intr;
- if (!rc) {
- PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
- "pci_enable_msix request ret:%d no of intr %d\n",
- rc, pm8001_ha->number_of_intr));
+ if (rc)
+ return rc;
+ PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
+ "pci_enable_msix_exact request ret:%d no of intr %d\n",
+ rc, pm8001_ha->number_of_intr));
- for (i = 0; i < number_of_intr; i++) {
- snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
- DRV_NAME"%d", i);
- pm8001_ha->irq_vector[i].irq_id = i;
- pm8001_ha->irq_vector[i].drv_inst = pm8001_ha;
+ for (i = 0; i < number_of_intr; i++) {
+ snprintf(intr_drvname[i], sizeof(intr_drvname[0]),
+ DRV_NAME"%d", i);
+ pm8001_ha->irq_vector[i].irq_id = i;
+ pm8001_ha->irq_vector[i].drv_inst = pm8001_ha;
- rc = request_irq(pm8001_ha->msix_entries[i].vector,
- pm8001_interrupt_handler_msix, flag,
- intr_drvname[i], &(pm8001_ha->irq_vector[i]));
- if (rc) {
- for (j = 0; j < i; j++)
- free_irq(
- pm8001_ha->msix_entries[j].vector,
+ rc = request_irq(pm8001_ha->msix_entries[i].vector,
+ pm8001_interrupt_handler_msix, flag,
+ intr_drvname[i], &(pm8001_ha->irq_vector[i]));
+ if (rc) {
+ for (j = 0; j < i; j++) {
+ free_irq(pm8001_ha->msix_entries[j].vector,
&(pm8001_ha->irq_vector[i]));
- pci_disable_msix(pm8001_ha->pdev);
- break;
}
+ pci_disable_msix(pm8001_ha->pdev);
+ break;
}
}
+
return rc;
}
#endif
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 6f12f859b11d..4180d6d9fe78 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -334,6 +334,12 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
/* Allocate memory for saving the template */
md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
&md_tmp_dma, GFP_KERNEL);
+ if (!md_tmp) {
+ ql4_printk(KERN_INFO, ha,
+ "scsi%ld: Failed to allocate DMA memory\n",
+ ha->host_no);
+ return;
+ }
/* Request template */
status = qla4xxx_get_minidump_template(ha, md_tmp_dma);
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index fdfae79924ac..c291fdff1b33 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1620,8 +1620,8 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
goto exit_get_chap;
}
- strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
- strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
+ strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
+ strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
exit_get_chap:
@@ -1663,8 +1663,8 @@ int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, char *password,
else
chap_table->flags |= BIT_7; /* local */
chap_table->secret_len = strlen(password);
- strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN);
- strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN);
+ strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN - 1);
+ strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN - 1);
chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE);
if (is_qla40XX(ha)) {
@@ -1742,8 +1742,8 @@ int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
goto exit_unlock_uni_chap;
}
- strncpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN);
- strncpy(username, chap_table->name, MAX_CHAP_NAME_LEN);
+ strlcpy(password, chap_table->secret, MAX_CHAP_SECRET_LEN);
+ strlcpy(username, chap_table->name, MAX_CHAP_NAME_LEN);
rval = QLA_SUCCESS;
@@ -2295,7 +2295,7 @@ int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param)
if (param == SET_DRVR_VERSION) {
mbox_cmd[1] = SET_DRVR_VERSION;
strncpy((char *)&mbox_cmd[2], QLA4XXX_DRIVER_VERSION,
- MAX_DRVR_VER_LEN);
+ MAX_DRVR_VER_LEN - 1);
} else {
ql4_printk(KERN_ERR, ha, "%s: invalid parameter 0x%x\n",
__func__, param);
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 9dbdb4be2d8f..7c3365864242 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -4221,7 +4221,7 @@ qla4_8xxx_enable_msix(struct scsi_qla_host *ha)
for (i = 0; i < QLA_MSIX_ENTRIES; i++)
entries[i].entry = qla4_8xxx_msix_entries[i].entry;
- ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries));
+ ret = pci_enable_msix_exact(ha->pdev, entries, ARRAY_SIZE(entries));
if (ret) {
ql4_printk(KERN_WARNING, ha,
"MSI-X: Failed to enable support -- %d/%d\n",
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index c5d9564d455c..199fcf79a051 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -756,9 +756,9 @@ static int qla4xxx_get_chap_list(struct Scsi_Host *shost, uint16_t chap_tbl_idx,
continue;
chap_rec->chap_tbl_idx = i;
- strncpy(chap_rec->username, chap_table->name,
+ strlcpy(chap_rec->username, chap_table->name,
ISCSI_CHAP_AUTH_NAME_MAX_LEN);
- strncpy(chap_rec->password, chap_table->secret,
+ strlcpy(chap_rec->password, chap_table->secret,
QL4_CHAP_MAX_SECRET_LEN);
chap_rec->password_length = chap_table->secret_len;
@@ -1050,6 +1050,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
if (!ql_iscsi_stats) {
ql4_printk(KERN_ERR, ha,
"Unable to allocate memory for iscsi stats\n");
+ ret = -ENOMEM;
goto exit_host_stats;
}
@@ -1058,6 +1059,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
if (ret != QLA_SUCCESS) {
ql4_printk(KERN_ERR, ha,
"Unable to retrieve iscsi stats\n");
+ ret = -EIO;
goto exit_host_stats;
}
host_stats->mactx_frames = le64_to_cpu(ql_iscsi_stats->mac_tx_frames);
@@ -6027,8 +6029,8 @@ static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username,
if (!(chap_table->flags & BIT_6)) /* Not BIDI */
continue;
- strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
- strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
+ strlcpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN);
+ strlcpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN);
ret = 0;
break;
}
@@ -6258,8 +6260,8 @@ static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
tddb->tpgt = sess->tpgt;
tddb->port = conn->persistent_port;
- strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
- strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
+ strlcpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
+ strlcpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
}
static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
@@ -7764,7 +7766,7 @@ static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess,
goto exit_ddb_logout;
}
- strncpy(flash_tddb->iscsi_name, fnode_sess->targetname,
+ strlcpy(flash_tddb->iscsi_name, fnode_sess->targetname,
ISCSI_NAME_SIZE);
if (!strncmp(fnode_sess->portal_type, PORTAL_TYPE_IPV6, 4))
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index b481e62a12cc..67d43e35693d 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -3429,7 +3429,7 @@ iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
char *buf;
if (!transport->get_host_stats)
- return -EINVAL;
+ return -ENOSYS;
priv = iscsi_if_transport_lookup(transport);
if (!priv)
@@ -3467,6 +3467,10 @@ iscsi_get_host_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
memset(buf, 0, host_stats_size);
err = transport->get_host_stats(shost, buf, host_stats_size);
+ if (err) {
+ kfree_skb(skbhost_stats);
+ goto exit_host_stats;
+ }
actual_size = nlmsg_total_size(sizeof(*ev) + host_stats_size);
skb_trim(skbhost_stats, NLMSG_ALIGN(actual_size));
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 4e76fe863fc4..d8dcf36aed11 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1006,7 +1006,7 @@ static int port_detect \
sh[j]->irq, dma_name, sh[j]->sg_tablesize, sh[j]->can_queue);
if (sh[j]->max_id > 8 || sh[j]->max_lun > 8)
- printk("%s: wide SCSI support enabled, max_id %u, max_lun %u.\n",
+ printk("%s: wide SCSI support enabled, max_id %u, max_lun %llu.\n",
BN(j), sh[j]->max_id, sh[j]->max_lun);
for (i = 0; i <= sh[j]->max_channel; i++)
@@ -1285,7 +1285,7 @@ static int u14_34f_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct
cpp->cpp_index = i;
SCpnt->host_scribble = (unsigned char *) &cpp->cpp_index;
- if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%llu.\n",
+ if (do_trace) printk("%s: qcomm, mbox %d, target %d.%d:%u.\n",
BN(j), i, SCpnt->device->channel, SCpnt->device->id,
(u8)SCpnt->device->lun);
diff --git a/fs/aio.c b/fs/aio.c
index bd7ec2cc2674..ae635872affb 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -192,7 +192,6 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
}
file->f_flags = O_RDWR;
- file->private_data = ctx;
return file;
}
@@ -202,7 +201,7 @@ static struct dentry *aio_mount(struct file_system_type *fs_type,
static const struct dentry_operations ops = {
.d_dname = simple_dname,
};
- return mount_pseudo(fs_type, "aio:", NULL, &ops, 0xa10a10a1);
+ return mount_pseudo(fs_type, "aio:", NULL, &ops, AIO_RING_MAGIC);
}
/* aio_setup
@@ -556,8 +555,7 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
struct aio_ring *ring;
spin_lock(&mm->ioctx_lock);
- rcu_read_lock();
- table = rcu_dereference(mm->ioctx_table);
+ table = rcu_dereference_raw(mm->ioctx_table);
while (1) {
if (table)
@@ -565,7 +563,6 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
if (!table->table[i]) {
ctx->id = i;
table->table[i] = ctx;
- rcu_read_unlock();
spin_unlock(&mm->ioctx_lock);
/* While kioctx setup is in progress,
@@ -579,8 +576,6 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
}
new_nr = (table ? table->nr : 1) * 4;
-
- rcu_read_unlock();
spin_unlock(&mm->ioctx_lock);
table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) *
@@ -591,8 +586,7 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
table->nr = new_nr;
spin_lock(&mm->ioctx_lock);
- rcu_read_lock();
- old = rcu_dereference(mm->ioctx_table);
+ old = rcu_dereference_raw(mm->ioctx_table);
if (!old) {
rcu_assign_pointer(mm->ioctx_table, table);
@@ -739,12 +733,9 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
spin_lock(&mm->ioctx_lock);
- rcu_read_lock();
- table = rcu_dereference(mm->ioctx_table);
-
+ table = rcu_dereference_raw(mm->ioctx_table);
WARN_ON(ctx != table->table[ctx->id]);
table->table[ctx->id] = NULL;
- rcu_read_unlock();
spin_unlock(&mm->ioctx_lock);
/* percpu_ref_kill() will do the necessary call_rcu() */
@@ -793,40 +784,30 @@ EXPORT_SYMBOL(wait_on_sync_kiocb);
*/
void exit_aio(struct mm_struct *mm)
{
- struct kioctx_table *table;
- struct kioctx *ctx;
- unsigned i = 0;
-
- while (1) {
- rcu_read_lock();
- table = rcu_dereference(mm->ioctx_table);
-
- do {
- if (!table || i >= table->nr) {
- rcu_read_unlock();
- rcu_assign_pointer(mm->ioctx_table, NULL);
- if (table)
- kfree(table);
- return;
- }
+ struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
+ int i;
- ctx = table->table[i++];
- } while (!ctx);
+ if (!table)
+ return;
- rcu_read_unlock();
+ for (i = 0; i < table->nr; ++i) {
+ struct kioctx *ctx = table->table[i];
+ if (!ctx)
+ continue;
/*
- * We don't need to bother with munmap() here -
- * exit_mmap(mm) is coming and it'll unmap everything.
- * Since aio_free_ring() uses non-zero ->mmap_size
- * as indicator that it needs to unmap the area,
- * just set it to 0; aio_free_ring() is the only
- * place that uses ->mmap_size, so it's safe.
+ * We don't need to bother with munmap() here - exit_mmap(mm)
+ * is coming and it'll unmap everything. And we simply can't,
+ * this is not necessarily our ->mm.
+ * Since kill_ioctx() uses non-zero ->mmap_size as indicator
+ * that it needs to unmap the area, just set it to 0.
*/
ctx->mmap_size = 0;
-
kill_ioctx(mm, ctx, NULL);
}
+
+ RCU_INIT_POINTER(mm->ioctx_table, NULL);
+ kfree(table);
}
static void put_reqs_available(struct kioctx *ctx, unsigned nr)
@@ -834,10 +815,8 @@ static void put_reqs_available(struct kioctx *ctx, unsigned nr)
struct kioctx_cpu *kcpu;
unsigned long flags;
- preempt_disable();
- kcpu = this_cpu_ptr(ctx->cpu);
-
local_irq_save(flags);
+ kcpu = this_cpu_ptr(ctx->cpu);
kcpu->reqs_available += nr;
while (kcpu->reqs_available >= ctx->req_batch * 2) {
@@ -846,7 +825,6 @@ static void put_reqs_available(struct kioctx *ctx, unsigned nr)
}
local_irq_restore(flags);
- preempt_enable();
}
static bool get_reqs_available(struct kioctx *ctx)
@@ -855,10 +833,8 @@ static bool get_reqs_available(struct kioctx *ctx)
bool ret = false;
unsigned long flags;
- preempt_disable();
- kcpu = this_cpu_ptr(ctx->cpu);
-
local_irq_save(flags);
+ kcpu = this_cpu_ptr(ctx->cpu);
if (!kcpu->reqs_available) {
int old, avail = atomic_read(&ctx->reqs_available);
@@ -878,7 +854,6 @@ static bool get_reqs_available(struct kioctx *ctx)
kcpu->reqs_available--;
out:
local_irq_restore(flags);
- preempt_enable();
return ret;
}
@@ -1047,7 +1022,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
}
EXPORT_SYMBOL(aio_complete);
-/* aio_read_events
+/* aio_read_events_ring
* Pull an event off of the ioctx's event ring. Returns the number of
* events fetched
*/
@@ -1270,12 +1245,12 @@ static ssize_t aio_setup_vectored_rw(struct kiocb *kiocb,
if (compat)
ret = compat_rw_copy_check_uvector(rw,
(struct compat_iovec __user *)buf,
- *nr_segs, 1, *iovec, iovec);
+ *nr_segs, UIO_FASTIOV, *iovec, iovec);
else
#endif
ret = rw_copy_check_uvector(rw,
(struct iovec __user *)buf,
- *nr_segs, 1, *iovec, iovec);
+ *nr_segs, UIO_FASTIOV, *iovec, iovec);
if (ret < 0)
return ret;
@@ -1299,9 +1274,8 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb,
}
/*
- * aio_setup_iocb:
- * Performs the initial checks and aio retry method
- * setup for the kiocb at the time of io submission.
+ * aio_run_iocb:
+ * Performs the initial checks and io submission.
*/
static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
char __user *buf, bool compat)
@@ -1313,7 +1287,7 @@ static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode,
fmode_t mode;
aio_rw_op *rw_op;
rw_iter_op *iter_op;
- struct iovec inline_vec, *iovec = &inline_vec;
+ struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
struct iov_iter iter;
switch (opcode) {
@@ -1348,7 +1322,7 @@ rw_common:
if (!ret)
ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
if (ret < 0) {
- if (iovec != &inline_vec)
+ if (iovec != inline_vecs)
kfree(iovec);
return ret;
}
@@ -1395,7 +1369,7 @@ rw_common:
return -EINVAL;
}
- if (iovec != &inline_vec)
+ if (iovec != inline_vecs)
kfree(iovec);
if (ret != -EIOCBQUEUED) {
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index e25564bfcb46..54a201dac7f9 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -276,9 +276,8 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
}
if (ret > 0)
goto next;
- ret = ulist_add_merge(parents, eb->start,
- (uintptr_t)eie,
- (u64 *)&old, GFP_NOFS);
+ ret = ulist_add_merge_ptr(parents, eb->start,
+ eie, (void **)&old, GFP_NOFS);
if (ret < 0)
break;
if (!ret && extent_item_pos) {
@@ -1001,16 +1000,19 @@ again:
ret = -EIO;
goto out;
}
+ btrfs_tree_read_lock(eb);
+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
ret = find_extent_in_eb(eb, bytenr,
*extent_item_pos, &eie);
+ btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
if (ret < 0)
goto out;
ref->inode_list = eie;
}
- ret = ulist_add_merge(refs, ref->parent,
- (uintptr_t)ref->inode_list,
- (u64 *)&eie, GFP_NOFS);
+ ret = ulist_add_merge_ptr(refs, ref->parent,
+ ref->inode_list,
+ (void **)&eie, GFP_NOFS);
if (ret < 0)
goto out;
if (!ret && extent_item_pos) {
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 4794923c410c..43527fd78825 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -84,12 +84,6 @@ struct btrfs_inode {
*/
struct list_head delalloc_inodes;
- /*
- * list for tracking inodes that must be sent to disk before a
- * rename or truncate commit
- */
- struct list_head ordered_operations;
-
/* node for the red-black tree that links inodes in subvolume root */
struct rb_node rb_node;
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index aeab453b8e24..44ee5d2e52a4 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -280,9 +280,9 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
WARN_ON(btrfs_header_generation(buf) > trans->transid);
if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
- ret = btrfs_inc_ref(trans, root, cow, 1, 1);
+ ret = btrfs_inc_ref(trans, root, cow, 1);
else
- ret = btrfs_inc_ref(trans, root, cow, 0, 1);
+ ret = btrfs_inc_ref(trans, root, cow, 0);
if (ret)
return ret;
@@ -1035,14 +1035,14 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
if ((owner == root->root_key.objectid ||
root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
- ret = btrfs_inc_ref(trans, root, buf, 1, 1);
+ ret = btrfs_inc_ref(trans, root, buf, 1);
BUG_ON(ret); /* -ENOMEM */
if (root->root_key.objectid ==
BTRFS_TREE_RELOC_OBJECTID) {
- ret = btrfs_dec_ref(trans, root, buf, 0, 1);
+ ret = btrfs_dec_ref(trans, root, buf, 0);
BUG_ON(ret); /* -ENOMEM */
- ret = btrfs_inc_ref(trans, root, cow, 1, 1);
+ ret = btrfs_inc_ref(trans, root, cow, 1);
BUG_ON(ret); /* -ENOMEM */
}
new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
@@ -1050,9 +1050,9 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
if (root->root_key.objectid ==
BTRFS_TREE_RELOC_OBJECTID)
- ret = btrfs_inc_ref(trans, root, cow, 1, 1);
+ ret = btrfs_inc_ref(trans, root, cow, 1);
else
- ret = btrfs_inc_ref(trans, root, cow, 0, 1);
+ ret = btrfs_inc_ref(trans, root, cow, 0);
BUG_ON(ret); /* -ENOMEM */
}
if (new_flags != 0) {
@@ -1069,11 +1069,11 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
if (root->root_key.objectid ==
BTRFS_TREE_RELOC_OBJECTID)
- ret = btrfs_inc_ref(trans, root, cow, 1, 1);
+ ret = btrfs_inc_ref(trans, root, cow, 1);
else
- ret = btrfs_inc_ref(trans, root, cow, 0, 1);
+ ret = btrfs_inc_ref(trans, root, cow, 0);
BUG_ON(ret); /* -ENOMEM */
- ret = btrfs_dec_ref(trans, root, buf, 1, 1);
+ ret = btrfs_dec_ref(trans, root, buf, 1);
BUG_ON(ret); /* -ENOMEM */
}
clean_tree_block(trans, root, buf);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index be91397f4e92..8e29b614fe93 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3326,9 +3326,9 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes,
u64 min_alloc_size, u64 empty_size, u64 hint_byte,
struct btrfs_key *ins, int is_data, int delalloc);
int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *buf, int full_backref, int no_quota);
+ struct extent_buffer *buf, int full_backref);
int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *buf, int full_backref, int no_quota);
+ struct extent_buffer *buf, int full_backref);
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 flags,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 08e65e9cf2aa..d0ed9e664f7d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -60,8 +60,6 @@ static void end_workqueue_fn(struct btrfs_work *work);
static void free_fs_root(struct btrfs_root *root);
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
int read_only);
-static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
- struct btrfs_root *root);
static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
struct btrfs_root *root);
@@ -3829,34 +3827,6 @@ static void btrfs_error_commit_super(struct btrfs_root *root)
btrfs_cleanup_transaction(root);
}
-static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
- struct btrfs_root *root)
-{
- struct btrfs_inode *btrfs_inode;
- struct list_head splice;
-
- INIT_LIST_HEAD(&splice);
-
- mutex_lock(&root->fs_info->ordered_operations_mutex);
- spin_lock(&root->fs_info->ordered_root_lock);
-
- list_splice_init(&t->ordered_operations, &splice);
- while (!list_empty(&splice)) {
- btrfs_inode = list_entry(splice.next, struct btrfs_inode,
- ordered_operations);
-
- list_del_init(&btrfs_inode->ordered_operations);
- spin_unlock(&root->fs_info->ordered_root_lock);
-
- btrfs_invalidate_inodes(btrfs_inode->root);
-
- spin_lock(&root->fs_info->ordered_root_lock);
- }
-
- spin_unlock(&root->fs_info->ordered_root_lock);
- mutex_unlock(&root->fs_info->ordered_operations_mutex);
-}
-
static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
{
struct btrfs_ordered_extent *ordered;
@@ -4093,8 +4063,6 @@ again:
void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
struct btrfs_root *root)
{
- btrfs_destroy_ordered_operations(cur_trans, root);
-
btrfs_destroy_delayed_refs(cur_trans, root);
cur_trans->state = TRANS_STATE_COMMIT_START;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 813537f362f9..102ed3143976 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3057,7 +3057,7 @@ out:
static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf,
- int full_backref, int inc, int no_quota)
+ int full_backref, int inc)
{
u64 bytenr;
u64 num_bytes;
@@ -3111,7 +3111,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
key.offset -= btrfs_file_extent_offset(buf, fi);
ret = process_func(trans, root, bytenr, num_bytes,
parent, ref_root, key.objectid,
- key.offset, no_quota);
+ key.offset, 1);
if (ret)
goto fail;
} else {
@@ -3119,7 +3119,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
num_bytes = btrfs_level_size(root, level - 1);
ret = process_func(trans, root, bytenr, num_bytes,
parent, ref_root, level - 1, 0,
- no_quota);
+ 1);
if (ret)
goto fail;
}
@@ -3130,15 +3130,15 @@ fail:
}
int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *buf, int full_backref, int no_quota)
+ struct extent_buffer *buf, int full_backref)
{
- return __btrfs_mod_ref(trans, root, buf, full_backref, 1, no_quota);
+ return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
}
int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
- struct extent_buffer *buf, int full_backref, int no_quota)
+ struct extent_buffer *buf, int full_backref)
{
- return __btrfs_mod_ref(trans, root, buf, full_backref, 0, no_quota);
+ return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
}
static int write_one_cache_group(struct btrfs_trans_handle *trans,
@@ -7478,6 +7478,220 @@ reada:
wc->reada_slot = slot;
}
+static int account_leaf_items(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *eb)
+{
+ int nr = btrfs_header_nritems(eb);
+ int i, extent_type, ret;
+ struct btrfs_key key;
+ struct btrfs_file_extent_item *fi;
+ u64 bytenr, num_bytes;
+
+ for (i = 0; i < nr; i++) {
+ btrfs_item_key_to_cpu(eb, &key, i);
+
+ if (key.type != BTRFS_EXTENT_DATA_KEY)
+ continue;
+
+ fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
+ /* filter out non qgroup-accountable extents */
+ extent_type = btrfs_file_extent_type(eb, fi);
+
+ if (extent_type == BTRFS_FILE_EXTENT_INLINE)
+ continue;
+
+ bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
+ if (!bytenr)
+ continue;
+
+ num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
+
+ ret = btrfs_qgroup_record_ref(trans, root->fs_info,
+ root->objectid,
+ bytenr, num_bytes,
+ BTRFS_QGROUP_OPER_SUB_SUBTREE, 0);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * Walk up the tree from the bottom, freeing leaves and any interior
+ * nodes which have had all slots visited. If a node (leaf or
+ * interior) is freed, the node above it will have it's slot
+ * incremented. The root node will never be freed.
+ *
+ * At the end of this function, we should have a path which has all
+ * slots incremented to the next position for a search. If we need to
+ * read a new node it will be NULL and the node above it will have the
+ * correct slot selected for a later read.
+ *
+ * If we increment the root nodes slot counter past the number of
+ * elements, 1 is returned to signal completion of the search.
+ */
+static int adjust_slots_upwards(struct btrfs_root *root,
+ struct btrfs_path *path, int root_level)
+{
+ int level = 0;
+ int nr, slot;
+ struct extent_buffer *eb;
+
+ if (root_level == 0)
+ return 1;
+
+ while (level <= root_level) {
+ eb = path->nodes[level];
+ nr = btrfs_header_nritems(eb);
+ path->slots[level]++;
+ slot = path->slots[level];
+ if (slot >= nr || level == 0) {
+ /*
+ * Don't free the root - we will detect this
+ * condition after our loop and return a
+ * positive value for caller to stop walking the tree.
+ */
+ if (level != root_level) {
+ btrfs_tree_unlock_rw(eb, path->locks[level]);
+ path->locks[level] = 0;
+
+ free_extent_buffer(eb);
+ path->nodes[level] = NULL;
+ path->slots[level] = 0;
+ }
+ } else {
+ /*
+ * We have a valid slot to walk back down
+ * from. Stop here so caller can process these
+ * new nodes.
+ */
+ break;
+ }
+
+ level++;
+ }
+
+ eb = path->nodes[root_level];
+ if (path->slots[root_level] >= btrfs_header_nritems(eb))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * root_eb is the subtree root and is locked before this function is called.
+ */
+static int account_shared_subtree(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *root_eb,
+ u64 root_gen,
+ int root_level)
+{
+ int ret = 0;
+ int level;
+ struct extent_buffer *eb = root_eb;
+ struct btrfs_path *path = NULL;
+
+ BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
+ BUG_ON(root_eb == NULL);
+
+ if (!root->fs_info->quota_enabled)
+ return 0;
+
+ if (!extent_buffer_uptodate(root_eb)) {
+ ret = btrfs_read_buffer(root_eb, root_gen);
+ if (ret)
+ goto out;
+ }
+
+ if (root_level == 0) {
+ ret = account_leaf_items(trans, root, root_eb);
+ goto out;
+ }
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ /*
+ * Walk down the tree. Missing extent blocks are filled in as
+ * we go. Metadata is accounted every time we read a new
+ * extent block.
+ *
+ * When we reach a leaf, we account for file extent items in it,
+ * walk back up the tree (adjusting slot pointers as we go)
+ * and restart the search process.
+ */
+ extent_buffer_get(root_eb); /* For path */
+ path->nodes[root_level] = root_eb;
+ path->slots[root_level] = 0;
+ path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
+walk_down:
+ level = root_level;
+ while (level >= 0) {
+ if (path->nodes[level] == NULL) {
+ int child_bsize = root->nodesize;
+ int parent_slot;
+ u64 child_gen;
+ u64 child_bytenr;
+
+ /* We need to get child blockptr/gen from
+ * parent before we can read it. */
+ eb = path->nodes[level + 1];
+ parent_slot = path->slots[level + 1];
+ child_bytenr = btrfs_node_blockptr(eb, parent_slot);
+ child_gen = btrfs_node_ptr_generation(eb, parent_slot);
+
+ eb = read_tree_block(root, child_bytenr, child_bsize,
+ child_gen);
+ if (!eb || !extent_buffer_uptodate(eb)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ path->nodes[level] = eb;
+ path->slots[level] = 0;
+
+ btrfs_tree_read_lock(eb);
+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
+
+ ret = btrfs_qgroup_record_ref(trans, root->fs_info,
+ root->objectid,
+ child_bytenr,
+ child_bsize,
+ BTRFS_QGROUP_OPER_SUB_SUBTREE,
+ 0);
+ if (ret)
+ goto out;
+
+ }
+
+ if (level == 0) {
+ ret = account_leaf_items(trans, root, path->nodes[level]);
+ if (ret)
+ goto out;
+
+ /* Nonzero return here means we completed our search */
+ ret = adjust_slots_upwards(root, path, root_level);
+ if (ret)
+ break;
+
+ /* Restart search with new slots */
+ goto walk_down;
+ }
+
+ level--;
+ }
+
+ ret = 0;
+out:
+ btrfs_free_path(path);
+
+ return ret;
+}
+
/*
* helper to process tree block while walking down the tree.
*
@@ -7532,9 +7746,9 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
/* wc->stage == UPDATE_BACKREF */
if (!(wc->flags[level] & flag)) {
BUG_ON(!path->locks[level]);
- ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
+ ret = btrfs_inc_ref(trans, root, eb, 1);
BUG_ON(ret); /* -ENOMEM */
- ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
+ ret = btrfs_dec_ref(trans, root, eb, 0);
BUG_ON(ret); /* -ENOMEM */
ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
eb->len, flag,
@@ -7581,6 +7795,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
int level = wc->level;
int reada = 0;
int ret = 0;
+ bool need_account = false;
generation = btrfs_node_ptr_generation(path->nodes[level],
path->slots[level]);
@@ -7626,6 +7841,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
if (wc->stage == DROP_REFERENCE) {
if (wc->refs[level - 1] > 1) {
+ need_account = true;
if (level == 1 &&
(wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
goto skip;
@@ -7689,6 +7905,16 @@ skip:
parent = 0;
}
+ if (need_account) {
+ ret = account_shared_subtree(trans, root, next,
+ generation, level - 1);
+ if (ret) {
+ printk_ratelimited(KERN_ERR "BTRFS: %s Error "
+ "%d accounting shared subtree. Quota "
+ "is out of sync, rescan required.\n",
+ root->fs_info->sb->s_id, ret);
+ }
+ }
ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
root->root_key.objectid, level - 1, 0, 0);
BUG_ON(ret); /* -ENOMEM */
@@ -7769,12 +7995,17 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
if (wc->refs[level] == 1) {
if (level == 0) {
if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
- ret = btrfs_dec_ref(trans, root, eb, 1,
- wc->for_reloc);
+ ret = btrfs_dec_ref(trans, root, eb, 1);
else
- ret = btrfs_dec_ref(trans, root, eb, 0,
- wc->for_reloc);
+ ret = btrfs_dec_ref(trans, root, eb, 0);
BUG_ON(ret); /* -ENOMEM */
+ ret = account_leaf_items(trans, root, eb);
+ if (ret) {
+ printk_ratelimited(KERN_ERR "BTRFS: %s Error "
+ "%d accounting leaf items. Quota "
+ "is out of sync, rescan required.\n",
+ root->fs_info->sb->s_id, ret);
+ }
}
/* make block locked assertion in clean_tree_block happy */
if (!path->locks[level] &&
@@ -7900,6 +8131,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
int level;
bool root_dropped = false;
+ btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
+
path = btrfs_alloc_path();
if (!path) {
err = -ENOMEM;
@@ -8025,6 +8258,24 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
goto out_end_trans;
}
+ /*
+ * Qgroup update accounting is run from
+ * delayed ref handling. This usually works
+ * out because delayed refs are normally the
+ * only way qgroup updates are added. However,
+ * we may have added updates during our tree
+ * walk so run qgroups here to make sure we
+ * don't lose any updates.
+ */
+ ret = btrfs_delayed_qgroup_accounting(trans,
+ root->fs_info);
+ if (ret)
+ printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
+ "running qgroup updates "
+ "during snapshot delete. "
+ "Quota is out of sync, "
+ "rescan required.\n", ret);
+
btrfs_end_transaction_throttle(trans, tree_root);
if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
pr_debug("BTRFS: drop snapshot early exit\n");
@@ -8078,6 +8329,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
}
root_dropped = true;
out_end_trans:
+ ret = btrfs_delayed_qgroup_accounting(trans, tree_root->fs_info);
+ if (ret)
+ printk_ratelimited(KERN_ERR "BTRFS: Failure %d "
+ "running qgroup updates "
+ "during snapshot delete. "
+ "Quota is out of sync, "
+ "rescan required.\n", ret);
+
btrfs_end_transaction_throttle(trans, tree_root);
out_free:
kfree(wc);
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index f46cfe45d686..54c84daec9b5 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -756,7 +756,7 @@ again:
found_next = 1;
if (ret != 0)
goto insert;
- slot = 0;
+ slot = path->slots[0];
}
btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 1f2b99cb55ea..d3afac292d67 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1838,33 +1838,9 @@ out:
int btrfs_release_file(struct inode *inode, struct file *filp)
{
- /*
- * ordered_data_close is set by settattr when we are about to truncate
- * a file from a non-zero size to a zero size. This tries to
- * flush down new bytes that may have been written if the
- * application were using truncate to replace a file in place.
- */
- if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
- &BTRFS_I(inode)->runtime_flags)) {
- struct btrfs_trans_handle *trans;
- struct btrfs_root *root = BTRFS_I(inode)->root;
-
- /*
- * We need to block on a committing transaction to keep us from
- * throwing a ordered operation on to the list and causing
- * something like sync to deadlock trying to flush out this
- * inode.
- */
- trans = btrfs_start_transaction(root, 0);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
- btrfs_add_ordered_operation(trans, BTRFS_I(inode)->root, inode);
- btrfs_end_transaction(trans, root);
- if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
- filemap_flush(inode->i_mapping);
- }
if (filp->private_data)
btrfs_ioctl_trans_end(filp);
+ filemap_flush(inode->i_mapping);
return 0;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 3183742d6f0d..03708ef3deef 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -709,6 +709,18 @@ retry:
unlock_extent(io_tree, async_extent->start,
async_extent->start +
async_extent->ram_size - 1);
+
+ /*
+ * we need to redirty the pages if we decide to
+ * fallback to uncompressed IO, otherwise we
+ * will not submit these pages down to lower
+ * layers.
+ */
+ extent_range_redirty_for_io(inode,
+ async_extent->start,
+ async_extent->start +
+ async_extent->ram_size - 1);
+
goto retry;
}
goto out_free;
@@ -7939,27 +7951,6 @@ static int btrfs_truncate(struct inode *inode)
BUG_ON(ret);
/*
- * setattr is responsible for setting the ordered_data_close flag,
- * but that is only tested during the last file release. That
- * could happen well after the next commit, leaving a great big
- * window where new writes may get lost if someone chooses to write
- * to this file after truncating to zero
- *
- * The inode doesn't have any dirty data here, and so if we commit
- * this is a noop. If someone immediately starts writing to the inode
- * it is very likely we'll catch some of their writes in this
- * transaction, and the commit will find this file on the ordered
- * data list with good things to send down.
- *
- * This is a best effort solution, there is still a window where
- * using truncate to replace the contents of the file will
- * end up with a zero length file after a crash.
- */
- if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
- &BTRFS_I(inode)->runtime_flags))
- btrfs_add_ordered_operation(trans, root, inode);
-
- /*
* So if we truncate and then write and fsync we normally would just
* write the extents that changed, which is a problem if we need to
* first truncate that entire inode. So set this flag so we write out
@@ -8106,7 +8097,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
mutex_init(&ei->delalloc_mutex);
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
INIT_LIST_HEAD(&ei->delalloc_inodes);
- INIT_LIST_HEAD(&ei->ordered_operations);
RB_CLEAR_NODE(&ei->rb_node);
return inode;
@@ -8146,17 +8136,6 @@ void btrfs_destroy_inode(struct inode *inode)
if (!root)
goto free;
- /*
- * Make sure we're properly removed from the ordered operation
- * lists.
- */
- smp_mb();
- if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
- spin_lock(&root->fs_info->ordered_root_lock);
- list_del_init(&BTRFS_I(inode)->ordered_operations);
- spin_unlock(&root->fs_info->ordered_root_lock);
- }
-
if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags)) {
btrfs_info(root->fs_info, "inode %llu still on the orphan list",
@@ -8338,12 +8317,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
ret = 0;
/*
- * we're using rename to replace one file with another.
- * and the replacement file is large. Start IO on it now so
- * we don't add too much work to the end of the transaction
+ * we're using rename to replace one file with another. Start IO on it
+ * now so we don't add too much work to the end of the transaction
*/
- if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
- old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
+ if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
filemap_flush(old_inode->i_mapping);
/* close the racy window with snapshot create/destroy ioctl */
@@ -8391,12 +8368,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
*/
btrfs_pin_log_trans(root);
}
- /*
- * make sure the inode gets flushed if it is replacing
- * something.
- */
- if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
- btrfs_add_ordered_operation(trans, root, old_inode);
inode_inc_iversion(old_dir);
inode_inc_iversion(new_dir);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 7187b14faa6c..963895c1f801 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -571,18 +571,6 @@ void btrfs_remove_ordered_extent(struct inode *inode,
trace_btrfs_ordered_extent_remove(inode, entry);
- /*
- * we have no more ordered extents for this inode and
- * no dirty pages. We can safely remove it from the
- * list of ordered extents
- */
- if (RB_EMPTY_ROOT(&tree->tree) &&
- !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
- spin_lock(&root->fs_info->ordered_root_lock);
- list_del_init(&BTRFS_I(inode)->ordered_operations);
- spin_unlock(&root->fs_info->ordered_root_lock);
- }
-
if (!root->nr_ordered_extents) {
spin_lock(&root->fs_info->ordered_root_lock);
BUG_ON(list_empty(&root->ordered_root));
@@ -687,81 +675,6 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr)
}
/*
- * this is used during transaction commit to write all the inodes
- * added to the ordered operation list. These files must be fully on
- * disk before the transaction commits.
- *
- * we have two modes here, one is to just start the IO via filemap_flush
- * and the other is to wait for all the io. When we wait, we have an
- * extra check to make sure the ordered operation list really is empty
- * before we return
- */
-int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int wait)
-{
- struct btrfs_inode *btrfs_inode;
- struct inode *inode;
- struct btrfs_transaction *cur_trans = trans->transaction;
- struct list_head splice;
- struct list_head works;
- struct btrfs_delalloc_work *work, *next;
- int ret = 0;
-
- INIT_LIST_HEAD(&splice);
- INIT_LIST_HEAD(&works);
-
- mutex_lock(&root->fs_info->ordered_extent_flush_mutex);
- spin_lock(&root->fs_info->ordered_root_lock);
- list_splice_init(&cur_trans->ordered_operations, &splice);
- while (!list_empty(&splice)) {
- btrfs_inode = list_entry(splice.next, struct btrfs_inode,
- ordered_operations);
- inode = &btrfs_inode->vfs_inode;
-
- list_del_init(&btrfs_inode->ordered_operations);
-
- /*
- * the inode may be getting freed (in sys_unlink path).
- */
- inode = igrab(inode);
- if (!inode)
- continue;
-
- if (!wait)
- list_add_tail(&BTRFS_I(inode)->ordered_operations,
- &cur_trans->ordered_operations);
- spin_unlock(&root->fs_info->ordered_root_lock);
-
- work = btrfs_alloc_delalloc_work(inode, wait, 1);
- if (!work) {
- spin_lock(&root->fs_info->ordered_root_lock);
- if (list_empty(&BTRFS_I(inode)->ordered_operations))
- list_add_tail(&btrfs_inode->ordered_operations,
- &splice);
- list_splice_tail(&splice,
- &cur_trans->ordered_operations);
- spin_unlock(&root->fs_info->ordered_root_lock);
- ret = -ENOMEM;
- goto out;
- }
- list_add_tail(&work->list, &works);
- btrfs_queue_work(root->fs_info->flush_workers,
- &work->work);
-
- cond_resched();
- spin_lock(&root->fs_info->ordered_root_lock);
- }
- spin_unlock(&root->fs_info->ordered_root_lock);
-out:
- list_for_each_entry_safe(work, next, &works, list) {
- list_del_init(&work->list);
- btrfs_wait_and_free_delalloc_work(work);
- }
- mutex_unlock(&root->fs_info->ordered_extent_flush_mutex);
- return ret;
-}
-
-/*
* Used to start IO or wait for a given ordered extent to finish.
*
* If wait is one, this effectively waits on page writeback for all the pages
@@ -1120,42 +1033,6 @@ out:
return index;
}
-
-/*
- * add a given inode to the list of inodes that must be fully on
- * disk before a transaction commit finishes.
- *
- * This basically gives us the ext3 style data=ordered mode, and it is mostly
- * used to make sure renamed files are fully on disk.
- *
- * It is a noop if the inode is already fully on disk.
- *
- * If trans is not null, we'll do a friendly check for a transaction that
- * is already flushing things and force the IO down ourselves.
- */
-void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, struct inode *inode)
-{
- struct btrfs_transaction *cur_trans = trans->transaction;
- u64 last_mod;
-
- last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
-
- /*
- * if this file hasn't been changed since the last transaction
- * commit, we can safely return without doing anything
- */
- if (last_mod <= root->fs_info->last_trans_committed)
- return;
-
- spin_lock(&root->fs_info->ordered_root_lock);
- if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
- list_add_tail(&BTRFS_I(inode)->ordered_operations,
- &cur_trans->ordered_operations);
- }
- spin_unlock(&root->fs_info->ordered_root_lock);
-}
-
int __init ordered_data_init(void)
{
btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 246897058efb..d81a274d621e 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -190,11 +190,6 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_extent *ordered);
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
u32 *sum, int len);
-int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int wait);
-void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct inode *inode);
int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr);
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr);
void btrfs_get_logged_extents(struct inode *inode,
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 98cb6b2630f9..b497498484be 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1201,6 +1201,50 @@ out:
mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
}
+
+static int comp_oper_exist(struct btrfs_qgroup_operation *oper1,
+ struct btrfs_qgroup_operation *oper2)
+{
+ /*
+ * Ignore seq and type here, we're looking for any operation
+ * at all related to this extent on that root.
+ */
+ if (oper1->bytenr < oper2->bytenr)
+ return -1;
+ if (oper1->bytenr > oper2->bytenr)
+ return 1;
+ if (oper1->ref_root < oper2->ref_root)
+ return -1;
+ if (oper1->ref_root > oper2->ref_root)
+ return 1;
+ return 0;
+}
+
+static int qgroup_oper_exists(struct btrfs_fs_info *fs_info,
+ struct btrfs_qgroup_operation *oper)
+{
+ struct rb_node *n;
+ struct btrfs_qgroup_operation *cur;
+ int cmp;
+
+ spin_lock(&fs_info->qgroup_op_lock);
+ n = fs_info->qgroup_op_tree.rb_node;
+ while (n) {
+ cur = rb_entry(n, struct btrfs_qgroup_operation, n);
+ cmp = comp_oper_exist(cur, oper);
+ if (cmp < 0) {
+ n = n->rb_right;
+ } else if (cmp) {
+ n = n->rb_left;
+ } else {
+ spin_unlock(&fs_info->qgroup_op_lock);
+ return -EEXIST;
+ }
+ }
+ spin_unlock(&fs_info->qgroup_op_lock);
+ return 0;
+}
+
static int comp_oper(struct btrfs_qgroup_operation *oper1,
struct btrfs_qgroup_operation *oper2)
{
@@ -1290,6 +1334,23 @@ int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
oper->seq = atomic_inc_return(&fs_info->qgroup_op_seq);
INIT_LIST_HEAD(&oper->elem.list);
oper->elem.seq = 0;
+
+ if (type == BTRFS_QGROUP_OPER_SUB_SUBTREE) {
+ /*
+ * If any operation for this bytenr/ref_root combo
+ * exists, then we know it's not exclusively owned and
+ * shouldn't be queued up.
+ *
+ * This also catches the case where we have a cloned
+ * extent that gets queued up multiple times during
+ * drop snapshot.
+ */
+ if (qgroup_oper_exists(fs_info, oper)) {
+ kfree(oper);
+ return 0;
+ }
+ }
+
ret = insert_qgroup_oper(fs_info, oper);
if (ret) {
/* Shouldn't happen so have an assert for developers */
@@ -1884,6 +1945,111 @@ out:
}
/*
+ * Process a reference to a shared subtree. This type of operation is
+ * queued during snapshot removal when we encounter extents which are
+ * shared between more than one root.
+ */
+static int qgroup_subtree_accounting(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+ struct btrfs_qgroup_operation *oper)
+{
+ struct ulist *roots = NULL;
+ struct ulist_node *unode;
+ struct ulist_iterator uiter;
+ struct btrfs_qgroup_list *glist;
+ struct ulist *parents;
+ int ret = 0;
+ int err;
+ struct btrfs_qgroup *qg;
+ u64 root_obj = 0;
+ struct seq_list elem = {};
+
+ parents = ulist_alloc(GFP_NOFS);
+ if (!parents)
+ return -ENOMEM;
+
+ btrfs_get_tree_mod_seq(fs_info, &elem);
+ ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr,
+ elem.seq, &roots);
+ btrfs_put_tree_mod_seq(fs_info, &elem);
+ if (ret < 0)
+ return ret;
+
+ if (roots->nnodes != 1)
+ goto out;
+
+ ULIST_ITER_INIT(&uiter);
+ unode = ulist_next(roots, &uiter); /* Only want 1 so no need to loop */
+ /*
+ * If we find our ref root then that means all refs
+ * this extent has to the root have not yet been
+ * deleted. In that case, we do nothing and let the
+ * last ref for this bytenr drive our update.
+ *
+ * This can happen for example if an extent is
+ * referenced multiple times in a snapshot (clone,
+ * etc). If we are in the middle of snapshot removal,
+ * queued updates for such an extent will find the
+ * root if we have not yet finished removing the
+ * snapshot.
+ */
+ if (unode->val == oper->ref_root)
+ goto out;
+
+ root_obj = unode->val;
+ BUG_ON(!root_obj);
+
+ spin_lock(&fs_info->qgroup_lock);
+ qg = find_qgroup_rb(fs_info, root_obj);
+ if (!qg)
+ goto out_unlock;
+
+ qg->excl += oper->num_bytes;
+ qg->excl_cmpr += oper->num_bytes;
+ qgroup_dirty(fs_info, qg);
+
+ /*
+ * Adjust counts for parent groups. First we find all
+ * parents, then in the 2nd loop we do the adjustment
+ * while adding parents of the parents to our ulist.
+ */
+ list_for_each_entry(glist, &qg->groups, next_group) {
+ err = ulist_add(parents, glist->group->qgroupid,
+ ptr_to_u64(glist->group), GFP_ATOMIC);
+ if (err < 0) {
+ ret = err;
+ goto out_unlock;
+ }
+ }
+
+ ULIST_ITER_INIT(&uiter);
+ while ((unode = ulist_next(parents, &uiter))) {
+ qg = u64_to_ptr(unode->aux);
+ qg->excl += oper->num_bytes;
+ qg->excl_cmpr += oper->num_bytes;
+ qgroup_dirty(fs_info, qg);
+
+ /* Add any parents of the parents */
+ list_for_each_entry(glist, &qg->groups, next_group) {
+ err = ulist_add(parents, glist->group->qgroupid,
+ ptr_to_u64(glist->group), GFP_ATOMIC);
+ if (err < 0) {
+ ret = err;
+ goto out_unlock;
+ }
+ }
+ }
+
+out_unlock:
+ spin_unlock(&fs_info->qgroup_lock);
+
+out:
+ ulist_free(roots);
+ ulist_free(parents);
+ return ret;
+}
+
+/*
* btrfs_qgroup_account_ref is called for every ref that is added to or deleted
* from the fs. First, all roots referencing the extent are searched, and
* then the space is accounted accordingly to the different roots. The
@@ -1920,6 +2086,9 @@ static int btrfs_qgroup_account(struct btrfs_trans_handle *trans,
case BTRFS_QGROUP_OPER_SUB_SHARED:
ret = qgroup_shared_accounting(trans, fs_info, oper);
break;
+ case BTRFS_QGROUP_OPER_SUB_SUBTREE:
+ ret = qgroup_subtree_accounting(trans, fs_info, oper);
+ break;
default:
ASSERT(0);
}
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 5952ff1fbd7a..18cc68ca3090 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -44,6 +44,7 @@ enum btrfs_qgroup_operation_type {
BTRFS_QGROUP_OPER_ADD_SHARED,
BTRFS_QGROUP_OPER_SUB_EXCL,
BTRFS_QGROUP_OPER_SUB_SHARED,
+ BTRFS_QGROUP_OPER_SUB_SUBTREE,
};
struct btrfs_qgroup_operation {
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 67b48b9a03e0..c4124de4435b 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1665,6 +1665,21 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
return 0;
}
+/*
+ * Calculate numbers for 'df', pessimistic in case of mixed raid profiles.
+ *
+ * If there's a redundant raid level at DATA block groups, use the respective
+ * multiplier to scale the sizes.
+ *
+ * Unused device space usage is based on simulating the chunk allocator
+ * algorithm that respects the device sizes, order of allocations and the
+ * 'alloc_start' value, this is a close approximation of the actual use but
+ * there are other factors that may change the result (like a new metadata
+ * chunk).
+ *
+ * FIXME: not accurate for mixed block groups, total and free/used are ok,
+ * available appears slightly larger.
+ */
static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
@@ -1675,6 +1690,8 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
u64 total_free_data = 0;
int bits = dentry->d_sb->s_blocksize_bits;
__be32 *fsid = (__be32 *)fs_info->fsid;
+ unsigned factor = 1;
+ struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
int ret;
/* holding chunk_muext to avoid allocating new chunks */
@@ -1682,30 +1699,52 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
rcu_read_lock();
list_for_each_entry_rcu(found, head, list) {
if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
+ int i;
+
total_free_data += found->disk_total - found->disk_used;
total_free_data -=
btrfs_account_ro_block_groups_free_space(found);
+
+ for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
+ if (!list_empty(&found->block_groups[i])) {
+ switch (i) {
+ case BTRFS_RAID_DUP:
+ case BTRFS_RAID_RAID1:
+ case BTRFS_RAID_RAID10:
+ factor = 2;
+ }
+ }
+ }
}
total_used += found->disk_used;
}
+
rcu_read_unlock();
- buf->f_namelen = BTRFS_NAME_LEN;
- buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits;
- buf->f_bfree = buf->f_blocks - (total_used >> bits);
- buf->f_bsize = dentry->d_sb->s_blocksize;
- buf->f_type = BTRFS_SUPER_MAGIC;
+ buf->f_blocks = div_u64(btrfs_super_total_bytes(disk_super), factor);
+ buf->f_blocks >>= bits;
+ buf->f_bfree = buf->f_blocks - (div_u64(total_used, factor) >> bits);
+
+ /* Account global block reserve as used, it's in logical size already */
+ spin_lock(&block_rsv->lock);
+ buf->f_bfree -= block_rsv->size >> bits;
+ spin_unlock(&block_rsv->lock);
+
buf->f_bavail = total_free_data;
ret = btrfs_calc_avail_data_space(fs_info->tree_root, &total_free_data);
if (ret) {
mutex_unlock(&fs_info->chunk_mutex);
return ret;
}
- buf->f_bavail += total_free_data;
+ buf->f_bavail += div_u64(total_free_data, factor);
buf->f_bavail = buf->f_bavail >> bits;
mutex_unlock(&fs_info->chunk_mutex);
+ buf->f_type = BTRFS_SUPER_MAGIC;
+ buf->f_bsize = dentry->d_sb->s_blocksize;
+ buf->f_namelen = BTRFS_NAME_LEN;
+
/* We treat it as constant endianness (it doesn't matter _which_)
because we want the fsid to come out the same whether mounted
on a big-endian or little-endian host */
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 5f379affdf23..d89c6d3542ca 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -218,7 +218,6 @@ loop:
spin_lock_init(&cur_trans->delayed_refs.lock);
INIT_LIST_HEAD(&cur_trans->pending_snapshots);
- INIT_LIST_HEAD(&cur_trans->ordered_operations);
INIT_LIST_HEAD(&cur_trans->pending_chunks);
INIT_LIST_HEAD(&cur_trans->switch_commits);
list_add_tail(&cur_trans->list, &fs_info->trans_list);
@@ -1612,27 +1611,6 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_trans_handle_cachep, trans);
}
-static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
- struct btrfs_root *root)
-{
- int ret;
-
- ret = btrfs_run_delayed_items(trans, root);
- if (ret)
- return ret;
-
- /*
- * rename don't use btrfs_join_transaction, so, once we
- * set the transaction to blocked above, we aren't going
- * to get any new ordered operations. We can safely run
- * it here and no for sure that nothing new will be added
- * to the list
- */
- ret = btrfs_run_ordered_operations(trans, root, 1);
-
- return ret;
-}
-
static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
{
if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
@@ -1653,13 +1631,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
struct btrfs_transaction *prev_trans = NULL;
int ret;
- ret = btrfs_run_ordered_operations(trans, root, 0);
- if (ret) {
- btrfs_abort_transaction(trans, root, ret);
- btrfs_end_transaction(trans, root);
- return ret;
- }
-
/* Stop the commit early if ->aborted is set */
if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
ret = cur_trans->aborted;
@@ -1740,7 +1711,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
if (ret)
goto cleanup_transaction;
- ret = btrfs_flush_all_pending_stuffs(trans, root);
+ ret = btrfs_run_delayed_items(trans, root);
if (ret)
goto cleanup_transaction;
@@ -1748,7 +1719,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
extwriter_counter_read(cur_trans) == 0);
/* some pending stuffs might be added after the previous flush. */
- ret = btrfs_flush_all_pending_stuffs(trans, root);
+ ret = btrfs_run_delayed_items(trans, root);
if (ret)
goto cleanup_transaction;
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 7dd558ed0716..579be51b27e5 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -55,7 +55,6 @@ struct btrfs_transaction {
wait_queue_head_t writer_wait;
wait_queue_head_t commit_wait;
struct list_head pending_snapshots;
- struct list_head ordered_operations;
struct list_head pending_chunks;
struct list_head switch_commits;
struct btrfs_delayed_ref_root delayed_refs;
diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h
index 7f78cbf5cf41..4c29db604bbe 100644
--- a/fs/btrfs/ulist.h
+++ b/fs/btrfs/ulist.h
@@ -57,6 +57,21 @@ void ulist_free(struct ulist *ulist);
int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
u64 *old_aux, gfp_t gfp_mask);
+
+/* just like ulist_add_merge() but take a pointer for the aux data */
+static inline int ulist_add_merge_ptr(struct ulist *ulist, u64 val, void *aux,
+ void **old_aux, gfp_t gfp_mask)
+{
+#if BITS_PER_LONG == 32
+ u64 old64 = (uintptr_t)*old_aux;
+ int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask);
+ *old_aux = (void *)((uintptr_t)old64);
+ return ret;
+#else
+ return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask);
+#endif
+}
+
struct ulist_node *ulist_next(struct ulist *ulist,
struct ulist_iterator *uiter);
diff --git a/fs/locks.c b/fs/locks.c
index a6f54802d277..cb66fb05ad4a 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -247,6 +247,18 @@ void locks_free_lock(struct file_lock *fl)
}
EXPORT_SYMBOL(locks_free_lock);
+static void
+locks_dispose_list(struct list_head *dispose)
+{
+ struct file_lock *fl;
+
+ while (!list_empty(dispose)) {
+ fl = list_first_entry(dispose, struct file_lock, fl_block);
+ list_del_init(&fl->fl_block);
+ locks_free_lock(fl);
+ }
+}
+
void locks_init_lock(struct file_lock *fl)
{
memset(fl, 0, sizeof(struct file_lock));
@@ -285,7 +297,8 @@ EXPORT_SYMBOL(__locks_copy_lock);
void locks_copy_lock(struct file_lock *new, struct file_lock *fl)
{
- locks_release_private(new);
+ /* "new" must be a freshly-initialized lock */
+ WARN_ON_ONCE(new->fl_ops);
__locks_copy_lock(new, fl);
new->fl_file = fl->fl_file;
@@ -650,12 +663,16 @@ static void locks_unlink_lock(struct file_lock **thisfl_p)
*
* Must be called with i_lock held!
*/
-static void locks_delete_lock(struct file_lock **thisfl_p)
+static void locks_delete_lock(struct file_lock **thisfl_p,
+ struct list_head *dispose)
{
struct file_lock *fl = *thisfl_p;
locks_unlink_lock(thisfl_p);
- locks_free_lock(fl);
+ if (dispose)
+ list_add(&fl->fl_block, dispose);
+ else
+ locks_free_lock(fl);
}
/* Determine if lock sys_fl blocks lock caller_fl. Common functionality
@@ -811,6 +828,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
struct inode * inode = file_inode(filp);
int error = 0;
int found = 0;
+ LIST_HEAD(dispose);
if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) {
new_fl = locks_alloc_lock();
@@ -833,7 +851,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
if (request->fl_type == fl->fl_type)
goto out;
found = 1;
- locks_delete_lock(before);
+ locks_delete_lock(before, &dispose);
break;
}
@@ -880,6 +898,7 @@ out:
spin_unlock(&inode->i_lock);
if (new_fl)
locks_free_lock(new_fl);
+ locks_dispose_list(&dispose);
return error;
}
@@ -893,6 +912,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
struct file_lock **before;
int error;
bool added = false;
+ LIST_HEAD(dispose);
/*
* We may need two file_lock structures for this operation,
@@ -988,7 +1008,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
else
request->fl_end = fl->fl_end;
if (added) {
- locks_delete_lock(before);
+ locks_delete_lock(before, &dispose);
continue;
}
request = fl;
@@ -1018,21 +1038,24 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
* one (This may happen several times).
*/
if (added) {
- locks_delete_lock(before);
+ locks_delete_lock(before, &dispose);
continue;
}
- /* Replace the old lock with the new one.
- * Wake up anybody waiting for the old one,
- * as the change in lock type might satisfy
- * their needs.
+ /*
+ * Replace the old lock with new_fl, and
+ * remove the old one. It's safe to do the
+ * insert here since we know that we won't be
+ * using new_fl later, and that the lock is
+ * just replacing an existing lock.
*/
- locks_wake_up_blocks(fl);
- fl->fl_start = request->fl_start;
- fl->fl_end = request->fl_end;
- fl->fl_type = request->fl_type;
- locks_release_private(fl);
- locks_copy_private(fl, request);
- request = fl;
+ error = -ENOLCK;
+ if (!new_fl)
+ goto out;
+ locks_copy_lock(new_fl, request);
+ request = new_fl;
+ new_fl = NULL;
+ locks_delete_lock(before, &dispose);
+ locks_insert_lock(before, request);
added = true;
}
}
@@ -1093,6 +1116,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
locks_free_lock(new_fl);
if (new_fl2)
locks_free_lock(new_fl2);
+ locks_dispose_list(&dispose);
return error;
}
@@ -1268,7 +1292,7 @@ int lease_modify(struct file_lock **before, int arg)
printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync);
fl->fl_fasync = NULL;
}
- locks_delete_lock(before);
+ locks_delete_lock(before, NULL);
}
return 0;
}
@@ -1737,13 +1761,10 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
ret = fl;
spin_lock(&inode->i_lock);
error = __vfs_setlease(filp, arg, &ret);
- if (error) {
- spin_unlock(&inode->i_lock);
- locks_free_lock(fl);
- goto out_free_fasync;
- }
- if (ret != fl)
- locks_free_lock(fl);
+ if (error)
+ goto out_unlock;
+ if (ret == fl)
+ fl = NULL;
/*
* fasync_insert_entry() returns the old entry if any.
@@ -1755,9 +1776,10 @@ static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
new = NULL;
error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
+out_unlock:
spin_unlock(&inode->i_lock);
-
-out_free_fasync:
+ if (fl)
+ locks_free_lock(fl);
if (new)
fasync_free(new);
return error;
@@ -2320,6 +2342,7 @@ void locks_remove_file(struct file *filp)
struct inode * inode = file_inode(filp);
struct file_lock *fl;
struct file_lock **before;
+ LIST_HEAD(dispose);
if (!inode->i_flock)
return;
@@ -2365,12 +2388,13 @@ void locks_remove_file(struct file *filp)
fl->fl_type, fl->fl_flags,
fl->fl_start, fl->fl_end);
- locks_delete_lock(before);
+ locks_delete_lock(before, &dispose);
continue;
}
before = &fl->fl_next;
}
spin_unlock(&inode->i_lock);
+ locks_dispose_list(&dispose);
}
/**
@@ -2452,7 +2476,11 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
seq_puts(f, "FLOCK ADVISORY ");
}
} else if (IS_LEASE(fl)) {
- seq_puts(f, "LEASE ");
+ if (fl->fl_flags & FL_DELEG)
+ seq_puts(f, "DELEG ");
+ else
+ seq_puts(f, "LEASE ");
+
if (lease_breaking(fl))
seq_puts(f, "BREAKING ");
else if (fl->fl_file)
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 8e6c20af11a2..e1e68da6f35c 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -194,6 +194,9 @@ static inline char *mc_event_error_type(const unsigned int err_type)
* @MEM_DDR3: DDR3 RAM
* @MEM_RDDR3: Registered DDR3 RAM
* This is a variant of the DDR3 memories.
+ * @MEM_DDR4: DDR4 RAM
+ * @MEM_RDDR4: Registered DDR4 RAM
+ * This is a variant of the DDR4 memories.
*/
enum mem_type {
MEM_EMPTY = 0,
@@ -213,6 +216,8 @@ enum mem_type {
MEM_XDR,
MEM_DDR3,
MEM_RDDR3,
+ MEM_DDR4,
+ MEM_RDDR4,
};
#define MEM_FLAG_EMPTY BIT(MEM_EMPTY)
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index 1b1dfa80d9ff..f583ff639776 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -105,6 +105,7 @@ void input_mt_report_slot_state(struct input_dev *dev,
void input_mt_report_finger_count(struct input_dev *dev, int count);
void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count);
+void input_mt_drop_unused(struct input_dev *dev);
void input_mt_sync_frame(struct input_dev *dev);
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index fd0421c6d40a..95ed9424a11a 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -527,6 +527,7 @@ enum iscsi_err {
ISCSI_ERR_XMIT_FAILED = ISCSI_ERR_BASE + 19,
ISCSI_ERR_TCP_CONN_CLOSE = ISCSI_ERR_BASE + 20,
ISCSI_ERR_SCSI_EH_SESSION_RST = ISCSI_ERR_BASE + 21,
+ ISCSI_ERR_NOP_TIMEDOUT = ISCSI_ERR_BASE + 22,
};
/*
diff --git a/sound/oss/uart401.c b/sound/oss/uart401.c
index 62b8869f5a4c..279bc565ac7e 100644
--- a/sound/oss/uart401.c
+++ b/sound/oss/uart401.c
@@ -30,7 +30,7 @@
#include "mpu401.h"
-typedef struct uart401_devc
+struct uart401_devc
{
int base;
int irq;
@@ -41,14 +41,13 @@ typedef struct uart401_devc
int my_dev;
int share_irq;
spinlock_t lock;
-}
-uart401_devc;
+};
#define DATAPORT (devc->base)
#define COMDPORT (devc->base+1)
#define STATPORT (devc->base+1)
-static int uart401_status(uart401_devc * devc)
+static int uart401_status(struct uart401_devc *devc)
{
return inb(STATPORT);
}
@@ -56,17 +55,17 @@ static int uart401_status(uart401_devc * devc)
#define input_avail(devc) (!(uart401_status(devc)&INPUT_AVAIL))
#define output_ready(devc) (!(uart401_status(devc)&OUTPUT_READY))
-static void uart401_cmd(uart401_devc * devc, unsigned char cmd)
+static void uart401_cmd(struct uart401_devc *devc, unsigned char cmd)
{
outb((cmd), COMDPORT);
}
-static int uart401_read(uart401_devc * devc)
+static int uart401_read(struct uart401_devc *devc)
{
return inb(DATAPORT);
}
-static void uart401_write(uart401_devc * devc, unsigned char byte)
+static void uart401_write(struct uart401_devc *devc, unsigned char byte)
{
outb((byte), DATAPORT);
}
@@ -77,10 +76,10 @@ static void uart401_write(uart401_devc * devc, unsigned char byte)
#define MPU_RESET 0xFF
#define UART_MODE_ON 0x3F
-static int reset_uart401(uart401_devc * devc);
-static void enter_uart_mode(uart401_devc * devc);
+static int reset_uart401(struct uart401_devc *devc);
+static void enter_uart_mode(struct uart401_devc *devc);
-static void uart401_input_loop(uart401_devc * devc)
+static void uart401_input_loop(struct uart401_devc *devc)
{
int work_limit=30000;
@@ -99,7 +98,7 @@ static void uart401_input_loop(uart401_devc * devc)
irqreturn_t uart401intr(int irq, void *dev_id)
{
- uart401_devc *devc = dev_id;
+ struct uart401_devc *devc = dev_id;
if (devc == NULL)
{
@@ -118,7 +117,8 @@ uart401_open(int dev, int mode,
void (*output) (int dev)
)
{
- uart401_devc *devc = (uart401_devc *) midi_devs[dev]->devc;
+ struct uart401_devc *devc = (struct uart401_devc *)
+ midi_devs[dev]->devc;
if (devc->opened)
return -EBUSY;
@@ -138,7 +138,8 @@ uart401_open(int dev, int mode,
static void uart401_close(int dev)
{
- uart401_devc *devc = (uart401_devc *) midi_devs[dev]->devc;
+ struct uart401_devc *devc = (struct uart401_devc *)
+ midi_devs[dev]->devc;
reset_uart401(devc);
devc->opened = 0;
@@ -148,7 +149,8 @@ static int uart401_out(int dev, unsigned char midi_byte)
{
int timeout;
unsigned long flags;
- uart401_devc *devc = (uart401_devc *) midi_devs[dev]->devc;
+ struct uart401_devc *devc = (struct uart401_devc *)
+ midi_devs[dev]->devc;
if (devc->disabled)
return 1;
@@ -219,7 +221,7 @@ static const struct midi_operations uart401_operations =
.buffer_status = uart401_buffer_status,
};
-static void enter_uart_mode(uart401_devc * devc)
+static void enter_uart_mode(struct uart401_devc *devc)
{
int ok, timeout;
unsigned long flags;
@@ -241,7 +243,7 @@ static void enter_uart_mode(uart401_devc * devc)
spin_unlock_irqrestore(&devc->lock,flags);
}
-static int reset_uart401(uart401_devc * devc)
+static int reset_uart401(struct uart401_devc *devc)
{
int ok, timeout, n;
@@ -285,7 +287,7 @@ static int reset_uart401(uart401_devc * devc)
int probe_uart401(struct address_info *hw_config, struct module *owner)
{
- uart401_devc *devc;
+ struct uart401_devc *devc;
char *name = "MPU-401 (UART) MIDI";
int ok = 0;
unsigned long flags;
@@ -300,7 +302,7 @@ int probe_uart401(struct address_info *hw_config, struct module *owner)
return 0;
}
- devc = kmalloc(sizeof(uart401_devc), GFP_KERNEL);
+ devc = kmalloc(sizeof(struct uart401_devc), GFP_KERNEL);
if (!devc) {
printk(KERN_WARNING "uart401: Can't allocate memory\n");
goto cleanup_region;
@@ -392,7 +394,7 @@ cleanup_region:
void unload_uart401(struct address_info *hw_config)
{
- uart401_devc *devc;
+ struct uart401_devc *devc;
int n=hw_config->slots[4];
/* Not set up */
diff --git a/sound/oss/waveartist.c b/sound/oss/waveartist.c
index 672af8b56542..b36ea47527e8 100644
--- a/sound/oss/waveartist.c
+++ b/sound/oss/waveartist.c
@@ -92,7 +92,7 @@ static unsigned short levels[SOUND_MIXER_NRDEVICES] = {
0x0000 /* Monitor */
};
-typedef struct {
+struct wavnc_info {
struct address_info hw; /* hardware */
char *chip_name;
@@ -119,7 +119,7 @@ typedef struct {
unsigned int line_mute_state :1;/* set by ioctl or autoselect */
unsigned int use_slider :1;/* use slider setting for o/p vol */
#endif
-} wavnc_info;
+};
/*
* This is the implementation specific mixer information.
@@ -129,29 +129,30 @@ struct waveartist_mixer_info {
unsigned int recording_devs; /* Recordable devies */
unsigned int stereo_devs; /* Stereo devices */
- unsigned int (*select_input)(wavnc_info *, unsigned int,
+ unsigned int (*select_input)(struct wavnc_info *, unsigned int,
unsigned char *, unsigned char *);
- int (*decode_mixer)(wavnc_info *, int,
+ int (*decode_mixer)(struct wavnc_info *, int,
unsigned char, unsigned char);
- int (*get_mixer)(wavnc_info *, int);
+ int (*get_mixer)(struct wavnc_info *, int);
};
-typedef struct wavnc_port_info {
+struct wavnc_port_info {
int open_mode;
int speed;
int channels;
int audio_format;
-} wavnc_port_info;
+};
static int nr_waveartist_devs;
-static wavnc_info adev_info[MAX_AUDIO_DEV];
+static struct wavnc_info adev_info[MAX_AUDIO_DEV];
static DEFINE_SPINLOCK(waveartist_lock);
#ifndef CONFIG_ARCH_NETWINDER
#define machine_is_netwinder() 0
#else
static struct timer_list vnc_timer;
-static void vnc_configure_mixer(wavnc_info *devc, unsigned int input_mask);
+static void vnc_configure_mixer(struct wavnc_info *devc,
+ unsigned int input_mask);
static int vnc_private_ioctl(int dev, unsigned int cmd, int __user *arg);
static void vnc_slider_tick(unsigned long data);
#endif
@@ -169,7 +170,7 @@ waveartist_set_ctlr(struct address_info *hw, unsigned char clear, unsigned char
/* Toggle IRQ acknowledge line
*/
static inline void
-waveartist_iack(wavnc_info *devc)
+waveartist_iack(struct wavnc_info *devc)
{
unsigned int ctlr_port = devc->hw.io_base + CTLR;
int old_ctlr;
@@ -188,7 +189,7 @@ waveartist_sleep(int timeout_ms)
}
static int
-waveartist_reset(wavnc_info *devc)
+waveartist_reset(struct wavnc_info *devc)
{
struct address_info *hw = &devc->hw;
unsigned int timeout, res = -1;
@@ -223,7 +224,7 @@ waveartist_reset(wavnc_info *devc)
* and can send or receive multiple words.
*/
static int
-waveartist_cmd(wavnc_info *devc,
+waveartist_cmd(struct wavnc_info *devc,
int nr_cmd, unsigned int *cmd,
int nr_resp, unsigned int *resp)
{
@@ -299,7 +300,7 @@ waveartist_cmd(wavnc_info *devc,
* Send one command word
*/
static inline int
-waveartist_cmd1(wavnc_info *devc, unsigned int cmd)
+waveartist_cmd1(struct wavnc_info *devc, unsigned int cmd)
{
return waveartist_cmd(devc, 1, &cmd, 0, NULL);
}
@@ -308,7 +309,7 @@ waveartist_cmd1(wavnc_info *devc, unsigned int cmd)
* Send one command, receive one word
*/
static inline unsigned int
-waveartist_cmd1_r(wavnc_info *devc, unsigned int cmd)
+waveartist_cmd1_r(struct wavnc_info *devc, unsigned int cmd)
{
unsigned int ret;
@@ -322,7 +323,7 @@ waveartist_cmd1_r(wavnc_info *devc, unsigned int cmd)
* word (and throw it away)
*/
static inline int
-waveartist_cmd2(wavnc_info *devc, unsigned int cmd, unsigned int arg)
+waveartist_cmd2(struct wavnc_info *devc, unsigned int cmd, unsigned int arg)
{
unsigned int vals[2];
@@ -336,7 +337,7 @@ waveartist_cmd2(wavnc_info *devc, unsigned int cmd, unsigned int arg)
* Send a triple command
*/
static inline int
-waveartist_cmd3(wavnc_info *devc, unsigned int cmd,
+waveartist_cmd3(struct wavnc_info *devc, unsigned int cmd,
unsigned int arg1, unsigned int arg2)
{
unsigned int vals[3];
@@ -349,7 +350,7 @@ waveartist_cmd3(wavnc_info *devc, unsigned int cmd,
}
static int
-waveartist_getrev(wavnc_info *devc, char *rev)
+waveartist_getrev(struct wavnc_info *devc, char *rev)
{
unsigned int temp[2];
unsigned int cmd = WACMD_GETREV;
@@ -371,15 +372,15 @@ static void waveartist_trigger(int dev, int state);
static int
waveartist_open(int dev, int mode)
{
- wavnc_info *devc;
- wavnc_port_info *portc;
+ struct wavnc_info *devc;
+ struct wavnc_port_info *portc;
unsigned long flags;
if (dev < 0 || dev >= num_audiodevs)
return -ENXIO;
- devc = (wavnc_info *) audio_devs[dev]->devc;
- portc = (wavnc_port_info *) audio_devs[dev]->portc;
+ devc = (struct wavnc_info *) audio_devs[dev]->devc;
+ portc = (struct wavnc_port_info *) audio_devs[dev]->portc;
spin_lock_irqsave(&waveartist_lock, flags);
if (portc->open_mode || (devc->open_mode & mode)) {
@@ -404,8 +405,10 @@ waveartist_open(int dev, int mode)
static void
waveartist_close(int dev)
{
- wavnc_info *devc = (wavnc_info *) audio_devs[dev]->devc;
- wavnc_port_info *portc = (wavnc_port_info *) audio_devs[dev]->portc;
+ struct wavnc_info *devc = (struct wavnc_info *)
+ audio_devs[dev]->devc;
+ struct wavnc_port_info *portc = (struct wavnc_port_info *)
+ audio_devs[dev]->portc;
unsigned long flags;
spin_lock_irqsave(&waveartist_lock, flags);
@@ -422,8 +425,10 @@ waveartist_close(int dev)
static void
waveartist_output_block(int dev, unsigned long buf, int __count, int intrflag)
{
- wavnc_port_info *portc = (wavnc_port_info *) audio_devs[dev]->portc;
- wavnc_info *devc = (wavnc_info *) audio_devs[dev]->devc;
+ struct wavnc_port_info *portc = (struct wavnc_port_info *)
+ audio_devs[dev]->portc;
+ struct wavnc_info *devc = (struct wavnc_info *)
+ audio_devs[dev]->devc;
unsigned long flags;
unsigned int count = __count;
@@ -467,8 +472,10 @@ waveartist_output_block(int dev, unsigned long buf, int __count, int intrflag)
static void
waveartist_start_input(int dev, unsigned long buf, int __count, int intrflag)
{
- wavnc_port_info *portc = (wavnc_port_info *) audio_devs[dev]->portc;
- wavnc_info *devc = (wavnc_info *) audio_devs[dev]->devc;
+ struct wavnc_port_info *portc = (struct wavnc_port_info *)
+ audio_devs[dev]->portc;
+ struct wavnc_info *devc = (struct wavnc_info *)
+ audio_devs[dev]->devc;
unsigned long flags;
unsigned int count = __count;
@@ -514,7 +521,7 @@ waveartist_ioctl(int dev, unsigned int cmd, void __user * arg)
}
static unsigned int
-waveartist_get_speed(wavnc_port_info *portc)
+waveartist_get_speed(struct wavnc_port_info *portc)
{
unsigned int speed;
@@ -542,7 +549,7 @@ waveartist_get_speed(wavnc_port_info *portc)
}
static unsigned int
-waveartist_get_bits(wavnc_port_info *portc)
+waveartist_get_bits(struct wavnc_port_info *portc)
{
unsigned int bits;
@@ -560,8 +567,10 @@ static int
waveartist_prepare_for_input(int dev, int bsize, int bcount)
{
unsigned long flags;
- wavnc_info *devc = (wavnc_info *) audio_devs[dev]->devc;
- wavnc_port_info *portc = (wavnc_port_info *) audio_devs[dev]->portc;
+ struct wavnc_info *devc = (struct wavnc_info *)
+ audio_devs[dev]->devc;
+ struct wavnc_port_info *portc = (struct wavnc_port_info *)
+ audio_devs[dev]->portc;
unsigned int speed, bits;
if (devc->audio_mode)
@@ -615,8 +624,10 @@ static int
waveartist_prepare_for_output(int dev, int bsize, int bcount)
{
unsigned long flags;
- wavnc_info *devc = (wavnc_info *) audio_devs[dev]->devc;
- wavnc_port_info *portc = (wavnc_port_info *) audio_devs[dev]->portc;
+ struct wavnc_info *devc = (struct wavnc_info *)
+ audio_devs[dev]->devc;
+ struct wavnc_port_info *portc = (struct wavnc_port_info *)
+ audio_devs[dev]->portc;
unsigned int speed, bits;
/*
@@ -660,8 +671,9 @@ waveartist_prepare_for_output(int dev, int bsize, int bcount)
static void
waveartist_halt(int dev)
{
- wavnc_port_info *portc = (wavnc_port_info *) audio_devs[dev]->portc;
- wavnc_info *devc;
+ struct wavnc_port_info *portc = (struct wavnc_port_info *)
+ audio_devs[dev]->portc;
+ struct wavnc_info *devc;
if (portc->open_mode & OPEN_WRITE)
waveartist_halt_output(dev);
@@ -669,14 +681,15 @@ waveartist_halt(int dev)
if (portc->open_mode & OPEN_READ)
waveartist_halt_input(dev);
- devc = (wavnc_info *) audio_devs[dev]->devc;
+ devc = (struct wavnc_info *) audio_devs[dev]->devc;
devc->audio_mode = 0;
}
static void
waveartist_halt_input(int dev)
{
- wavnc_info *devc = (wavnc_info *) audio_devs[dev]->devc;
+ struct wavnc_info *devc = (struct wavnc_info *)
+ audio_devs[dev]->devc;
unsigned long flags;
spin_lock_irqsave(&waveartist_lock, flags);
@@ -703,7 +716,8 @@ waveartist_halt_input(int dev)
static void
waveartist_halt_output(int dev)
{
- wavnc_info *devc = (wavnc_info *) audio_devs[dev]->devc;
+ struct wavnc_info *devc = (struct wavnc_info *)
+ audio_devs[dev]->devc;
unsigned long flags;
spin_lock_irqsave(&waveartist_lock, flags);
@@ -727,8 +741,10 @@ waveartist_halt_output(int dev)
static void
waveartist_trigger(int dev, int state)
{
- wavnc_info *devc = (wavnc_info *) audio_devs[dev]->devc;
- wavnc_port_info *portc = (wavnc_port_info *) audio_devs[dev]->portc;
+ struct wavnc_info *devc = (struct wavnc_info *)
+ audio_devs[dev]->devc;
+ struct wavnc_port_info *portc = (struct wavnc_port_info *)
+ audio_devs[dev]->portc;
unsigned long flags;
if (debug_flg & DEBUG_TRIGGER) {
@@ -764,7 +780,8 @@ waveartist_trigger(int dev, int state)
static int
waveartist_set_speed(int dev, int arg)
{
- wavnc_port_info *portc = (wavnc_port_info *) audio_devs[dev]->portc;
+ struct wavnc_port_info *portc = (struct wavnc_port_info *)
+ audio_devs[dev]->portc;
if (arg <= 0)
return portc->speed;
@@ -782,7 +799,8 @@ waveartist_set_speed(int dev, int arg)
static short
waveartist_set_channels(int dev, short arg)
{
- wavnc_port_info *portc = (wavnc_port_info *) audio_devs[dev]->portc;
+ struct wavnc_port_info *portc = (struct wavnc_port_info *)
+ audio_devs[dev]->portc;
if (arg != 1 && arg != 2)
return portc->channels;
@@ -794,7 +812,8 @@ waveartist_set_channels(int dev, short arg)
static unsigned int
waveartist_set_bits(int dev, unsigned int arg)
{
- wavnc_port_info *portc = (wavnc_port_info *) audio_devs[dev]->portc;
+ struct wavnc_port_info *portc = (struct wavnc_port_info *)
+ audio_devs[dev]->portc;
if (arg == 0)
return portc->audio_format;
@@ -829,7 +848,7 @@ static struct audio_driver waveartist_audio_driver = {
static irqreturn_t
waveartist_intr(int irq, void *dev_id)
{
- wavnc_info *devc = dev_id;
+ struct wavnc_info *devc = dev_id;
int irqstatus, status;
spin_lock(&waveartist_lock);
@@ -912,7 +931,7 @@ static const struct mix_ent mix_devs[SOUND_MIXER_NRDEVICES] = {
};
static void
-waveartist_mixer_update(wavnc_info *devc, int whichDev)
+waveartist_mixer_update(struct wavnc_info *devc, int whichDev)
{
unsigned int lev_left, lev_right;
@@ -973,7 +992,8 @@ waveartist_mixer_update(wavnc_info *devc, int whichDev)
* relevant *_select_input function has done that for us.
*/
static void
-waveartist_set_adc_mux(wavnc_info *devc, char left_dev, char right_dev)
+waveartist_set_adc_mux(struct wavnc_info *devc, char left_dev,
+ char right_dev)
{
unsigned int reg_08, reg_09;
@@ -996,7 +1016,7 @@ waveartist_set_adc_mux(wavnc_info *devc, char left_dev, char right_dev)
* SOUND_MASK_MIC Mic Microphone
*/
static unsigned int
-waveartist_select_input(wavnc_info *devc, unsigned int recmask,
+waveartist_select_input(struct wavnc_info *devc, unsigned int recmask,
unsigned char *dev_l, unsigned char *dev_r)
{
unsigned int recdev = ADC_MUX_NONE;
@@ -1024,7 +1044,8 @@ waveartist_select_input(wavnc_info *devc, unsigned int recmask,
}
static int
-waveartist_decode_mixer(wavnc_info *devc, int dev, unsigned char lev_l,
+waveartist_decode_mixer(struct wavnc_info *devc, int dev,
+ unsigned char lev_l,
unsigned char lev_r)
{
switch (dev) {
@@ -1050,7 +1071,7 @@ waveartist_decode_mixer(wavnc_info *devc, int dev, unsigned char lev_l,
return dev;
}
-static int waveartist_get_mixer(wavnc_info *devc, int dev)
+static int waveartist_get_mixer(struct wavnc_info *devc, int dev)
{
return devc->levels[dev];
}
@@ -1068,7 +1089,7 @@ static const struct waveartist_mixer_info waveartist_mixer = {
};
static void
-waveartist_set_recmask(wavnc_info *devc, unsigned int recmask)
+waveartist_set_recmask(struct wavnc_info *devc, unsigned int recmask)
{
unsigned char dev_l, dev_r;
@@ -1092,7 +1113,7 @@ waveartist_set_recmask(wavnc_info *devc, unsigned int recmask)
}
static int
-waveartist_set_mixer(wavnc_info *devc, int dev, unsigned int level)
+waveartist_set_mixer(struct wavnc_info *devc, int dev, unsigned int level)
{
unsigned int lev_left = level & 0x00ff;
unsigned int lev_right = (level & 0xff00) >> 8;
@@ -1120,7 +1141,7 @@ waveartist_set_mixer(wavnc_info *devc, int dev, unsigned int level)
static int
waveartist_mixer_ioctl(int dev, unsigned int cmd, void __user * arg)
{
- wavnc_info *devc = (wavnc_info *)audio_devs[dev]->devc;
+ struct wavnc_info *devc = (struct wavnc_info *)audio_devs[dev]->devc;
int ret = 0, val, nr;
/*
@@ -1204,7 +1225,7 @@ static struct mixer_operations waveartist_mixer_operations =
};
static void
-waveartist_mixer_reset(wavnc_info *devc)
+waveartist_mixer_reset(struct wavnc_info *devc)
{
int i;
@@ -1241,9 +1262,9 @@ waveartist_mixer_reset(wavnc_info *devc)
waveartist_mixer_update(devc, i);
}
-static int __init waveartist_init(wavnc_info *devc)
+static int __init waveartist_init(struct wavnc_info *devc)
{
- wavnc_port_info *portc;
+ struct wavnc_port_info *portc;
char rev[3], dev_name[64];
int my_dev;
@@ -1261,7 +1282,7 @@ static int __init waveartist_init(wavnc_info *devc)
conf_printf2(dev_name, devc->hw.io_base, devc->hw.irq,
devc->hw.dma, devc->hw.dma2);
- portc = kzalloc(sizeof(wavnc_port_info), GFP_KERNEL);
+ portc = kzalloc(sizeof(struct wavnc_port_info), GFP_KERNEL);
if (portc == NULL)
goto nomem;
@@ -1330,7 +1351,7 @@ nomem:
static int __init probe_waveartist(struct address_info *hw_config)
{
- wavnc_info *devc = &adev_info[nr_waveartist_devs];
+ struct wavnc_info *devc = &adev_info[nr_waveartist_devs];
if (nr_waveartist_devs >= MAX_AUDIO_DEV) {
printk(KERN_WARNING "waveartist: too many audio devices\n");
@@ -1367,7 +1388,7 @@ static int __init probe_waveartist(struct address_info *hw_config)
static void __init
attach_waveartist(struct address_info *hw, const struct waveartist_mixer_info *mix)
{
- wavnc_info *devc = &adev_info[nr_waveartist_devs];
+ struct wavnc_info *devc = &adev_info[nr_waveartist_devs];
/*
* NOTE! If irq < 0, there is another driver which has allocated the
@@ -1410,7 +1431,7 @@ attach_waveartist(struct address_info *hw, const struct waveartist_mixer_info *m
static void __exit unload_waveartist(struct address_info *hw)
{
- wavnc_info *devc = NULL;
+ struct wavnc_info *devc = NULL;
int i;
for (i = 0; i < nr_waveartist_devs; i++)
@@ -1478,7 +1499,7 @@ static void __exit unload_waveartist(struct address_info *hw)
#define VNC_DISABLE_AUTOSWITCH 0x80
static inline void
-vnc_mute_spkr(wavnc_info *devc)
+vnc_mute_spkr(struct wavnc_info *devc)
{
unsigned long flags;
@@ -1488,7 +1509,7 @@ vnc_mute_spkr(wavnc_info *devc)
}
static void
-vnc_mute_lout(wavnc_info *devc)
+vnc_mute_lout(struct wavnc_info *devc)
{
unsigned int left, right;
@@ -1507,7 +1528,7 @@ vnc_mute_lout(wavnc_info *devc)
}
static int
-vnc_volume_slider(wavnc_info *devc)
+vnc_volume_slider(struct wavnc_info *devc)
{
static signed int old_slider_volume;
unsigned long flags;
@@ -1567,7 +1588,7 @@ vnc_volume_slider(wavnc_info *devc)
* SOUND_MASK_MIC Right Mic Builtin microphone
*/
static unsigned int
-netwinder_select_input(wavnc_info *devc, unsigned int recmask,
+netwinder_select_input(struct wavnc_info *devc, unsigned int recmask,
unsigned char *dev_l, unsigned char *dev_r)
{
unsigned int recdev_l = ADC_MUX_NONE, recdev_r = ADC_MUX_NONE;
@@ -1604,7 +1625,7 @@ netwinder_select_input(wavnc_info *devc, unsigned int recmask,
}
static int
-netwinder_decode_mixer(wavnc_info *devc, int dev, unsigned char lev_l,
+netwinder_decode_mixer(struct wavnc_info *devc, int dev, unsigned char lev_l,
unsigned char lev_r)
{
switch (dev) {
@@ -1643,7 +1664,7 @@ netwinder_decode_mixer(wavnc_info *devc, int dev, unsigned char lev_l,
return dev;
}
-static int netwinder_get_mixer(wavnc_info *devc, int dev)
+static int netwinder_get_mixer(struct wavnc_info *devc, int dev)
{
int levels;
@@ -1703,7 +1724,7 @@ static const struct waveartist_mixer_info netwinder_mixer = {
};
static void
-vnc_configure_mixer(wavnc_info *devc, unsigned int recmask)
+vnc_configure_mixer(struct wavnc_info *devc, unsigned int recmask)
{
if (!devc->no_autoselect) {
if (devc->handset_detect) {
@@ -1729,7 +1750,7 @@ vnc_configure_mixer(wavnc_info *devc, unsigned int recmask)
}
static int
-vnc_slider(wavnc_info *devc)
+vnc_slider(struct wavnc_info *devc)
{
signed int slider_volume;
unsigned int temp, old_hs, old_td;
@@ -1795,7 +1816,7 @@ vnc_slider_tick(unsigned long data)
static int
vnc_private_ioctl(int dev, unsigned int cmd, int __user * arg)
{
- wavnc_info *devc = (wavnc_info *)audio_devs[dev]->devc;
+ struct wavnc_info *devc = (struct wavnc_info *)audio_devs[dev]->devc;
int val;
switch (cmd) {
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 5db1948699d8..aa302fb03fc5 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -265,6 +265,7 @@ enum {
AZX_DRIVER_TERA,
AZX_DRIVER_CTX,
AZX_DRIVER_CTHDA,
+ AZX_DRIVER_CMEDIA,
AZX_DRIVER_GENERIC,
AZX_NUM_DRIVERS, /* keep this as last entry */
};
@@ -330,6 +331,7 @@ static char *driver_short_names[] = {
[AZX_DRIVER_TERA] = "HDA Teradici",
[AZX_DRIVER_CTX] = "HDA Creative",
[AZX_DRIVER_CTHDA] = "HDA Creative",
+ [AZX_DRIVER_CMEDIA] = "HDA C-Media",
[AZX_DRIVER_GENERIC] = "HD-Audio Generic",
};
@@ -1373,6 +1375,7 @@ static void azx_check_snoop_available(struct azx *chip)
snoop = false;
break;
case AZX_DRIVER_CTHDA:
+ case AZX_DRIVER_CMEDIA:
snoop = false;
break;
}
@@ -2154,6 +2157,10 @@ static const struct pci_device_id azx_ids[] = {
.driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
#endif
+ /* CM8888 */
+ { PCI_DEVICE(0x13f6, 0x5011),
+ .driver_data = AZX_DRIVER_CMEDIA |
+ AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB },
/* Vortex86MX */
{ PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC },
/* VMware HDAudio */
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 4f3aba78f720..5d8455e2dacd 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -4376,6 +4376,9 @@ static void ca0132_download_dsp(struct hda_codec *codec)
return; /* NOP */
#endif
+ if (spec->dsp_state == DSP_DOWNLOAD_FAILED)
+ return; /* don't retry failures */
+
chipio_enable_clocks(codec);
spec->dsp_state = DSP_DOWNLOADING;
if (!ca0132_download_dsp_images(codec))
@@ -4552,7 +4555,8 @@ static int ca0132_init(struct hda_codec *codec)
struct auto_pin_cfg *cfg = &spec->autocfg;
int i;
- spec->dsp_state = DSP_DOWNLOAD_INIT;
+ if (spec->dsp_state != DSP_DOWNLOAD_FAILED)
+ spec->dsp_state = DSP_DOWNLOAD_INIT;
spec->curr_chip_addx = INVALID_CHIP_ADDRESS;
snd_hda_power_up(codec);
@@ -4663,6 +4667,7 @@ static int patch_ca0132(struct hda_codec *codec)
codec->spec = spec;
spec->codec = codec;
+ spec->dsp_state = DSP_DOWNLOAD_INIT;
spec->num_mixers = 1;
spec->mixers[0] = ca0132_mixer;
diff --git a/sound/pci/hda/patch_cmedia.c b/sound/pci/hda/patch_cmedia.c
index ed3d133ffbb6..c895a8f21192 100644
--- a/sound/pci/hda/patch_cmedia.c
+++ b/sound/pci/hda/patch_cmedia.c
@@ -75,15 +75,62 @@ static int patch_cmi9880(struct hda_codec *codec)
return err;
}
+static int patch_cmi8888(struct hda_codec *codec)
+{
+ struct cmi_spec *spec;
+ struct auto_pin_cfg *cfg;
+ int err;
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ codec->spec = spec;
+ cfg = &spec->gen.autocfg;
+ snd_hda_gen_spec_init(&spec->gen);
+
+ /* mask NID 0x10 from the playback volume selection;
+ * it's a headphone boost volume handled manually below
+ */
+ spec->gen.out_vol_mask = (1ULL << 0x10);
+
+ err = snd_hda_parse_pin_defcfg(codec, cfg, NULL, 0);
+ if (err < 0)
+ goto error;
+ err = snd_hda_gen_parse_auto_config(codec, cfg);
+ if (err < 0)
+ goto error;
+
+ if (get_defcfg_device(snd_hda_codec_get_pincfg(codec, 0x10)) ==
+ AC_JACK_HP_OUT) {
+ static const struct snd_kcontrol_new amp_kctl =
+ HDA_CODEC_VOLUME("Headphone Amp Playback Volume",
+ 0x10, 0, HDA_OUTPUT);
+ if (!snd_hda_gen_add_kctl(&spec->gen, NULL, &amp_kctl)) {
+ err = -ENOMEM;
+ goto error;
+ }
+ }
+
+ codec->patch_ops = cmi_auto_patch_ops;
+ return 0;
+
+ error:
+ snd_hda_gen_free(codec);
+ return err;
+}
+
/*
* patch entries
*/
static const struct hda_codec_preset snd_hda_preset_cmedia[] = {
+ { .id = 0x13f68888, .name = "CMI8888", .patch = patch_cmi8888 },
{ .id = 0x13f69880, .name = "CMI9880", .patch = patch_cmi9880 },
{ .id = 0x434d4980, .name = "CMI9880", .patch = patch_cmi9880 },
{} /* terminator */
};
+MODULE_ALIAS("snd-hda-codec-id:13f68888");
MODULE_ALIAS("snd-hda-codec-id:13f69880");
MODULE_ALIAS("snd-hda-codec-id:434d4980");
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 7627a69ca6d7..6f2fa838b635 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <sound/core.h>
#include <sound/jack.h>
+#include <sound/tlv.h>
#include "hda_codec.h"
#include "hda_local.h"
@@ -859,6 +860,11 @@ static int patch_conexant_auto(struct hda_codec *codec)
if (err < 0)
goto error;
+ if (codec->vendor_id == 0x14f15051) {
+ /* minimum value is actually mute */
+ spec->gen.vmaster_tlv[3] |= TLV_DB_SCALE_MUTE;
+ }
+
codec->patch_ops = cx_auto_patch_ops;
/* Some laptops with Conexant chips show stalls in S3 resume,
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 654c8f16d150..6b38ec3c6e57 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2782,6 +2782,27 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
return alc_parse_auto_config(codec, alc269_ignore, ssids);
}
+static int find_ext_mic_pin(struct hda_codec *codec);
+
+static void alc286_shutup(struct hda_codec *codec)
+{
+ int i;
+ int mic_pin = find_ext_mic_pin(codec);
+ /* don't shut up pins when unloading the driver; otherwise it breaks
+ * the default pin setup at the next load of the driver
+ */
+ if (codec->bus->shutdown)
+ return;
+ for (i = 0; i < codec->init_pins.used; i++) {
+ struct hda_pincfg *pin = snd_array_elem(&codec->init_pins, i);
+ /* use read here for syncing after issuing each verb */
+ if (pin->nid != mic_pin)
+ snd_hda_codec_read(codec, pin->nid, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
+ }
+ codec->pins_shutup = 1;
+}
+
static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up)
{
int val = alc_read_coef_idx(codec, 0x04);
@@ -4072,7 +4093,7 @@ static unsigned int alc_power_filter_xps13(struct hda_codec *codec,
/* Avoid pop noises when headphones are plugged in */
if (spec->gen.hp_jack_present)
- if (nid == codec->afg || nid == 0x02)
+ if (nid == codec->afg || nid == 0x02 || nid == 0x15)
return AC_PWRST_D0;
return power_state;
}
@@ -4082,8 +4103,19 @@ static void alc_fixup_dell_xps13(struct hda_codec *codec,
{
if (action == HDA_FIXUP_ACT_PROBE) {
struct alc_spec *spec = codec->spec;
+ struct hda_input_mux *imux = &spec->gen.input_mux;
+ int i;
+
spec->shutup = alc_no_shutup;
codec->power_filter = alc_power_filter_xps13;
+
+ /* Make the internal mic the default input source. */
+ for (i = 0; i < imux->num_items; i++) {
+ if (spec->gen.imux_pins[i] == 0x12) {
+ spec->gen.cur_mux[0] = i;
+ break;
+ }
+ }
}
}
@@ -5384,6 +5416,7 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0286:
case 0x10ec0288:
spec->codec_variant = ALC269_TYPE_ALC286;
+ spec->shutup = alc286_shutup;
break;
case 0x10ec0255:
spec->codec_variant = ALC269_TYPE_ALC255;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index f652b10ce905..223c47b33ba3 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -1581,6 +1581,35 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},
{
+ /* BOSS ME-25 */
+ USB_DEVICE(0x0582, 0x0113),
+ .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .data = & (const struct snd_usb_midi_endpoint_info) {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
+{
/* only 44.1 kHz works at the moment */
USB_DEVICE(0x0582, 0x0120),
.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {