aboutsummaryrefslogtreecommitdiff
path: root/drivers/edac
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/edac')
-rw-r--r--drivers/edac/amd64_edac.c214
-rw-r--r--drivers/edac/amd64_edac.h12
-rw-r--r--drivers/edac/edac_mc.c6
-rw-r--r--drivers/edac/edac_pci_sysfs.c2
-rw-r--r--drivers/edac/mce_amd.c166
-rw-r--r--drivers/edac/mce_amd.h13
-rw-r--r--drivers/edac/mpc85xx_edac.c4
7 files changed, 184 insertions, 233 deletions
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index ad8bf2aa629..910b0116c12 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -31,7 +31,7 @@ static struct ecc_settings **ecc_stngs;
*
*FIXME: Produce a better mapping/linearisation.
*/
-struct scrubrate {
+static const struct scrubrate {
u32 scrubval; /* bit pattern for scrub rate */
u32 bandwidth; /* bandwidth consumed (bytes/sec) */
} scrubrates[] = {
@@ -239,7 +239,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
* DRAM base/limit associated with node_id
*/
static bool amd64_base_limit_match(struct amd64_pvt *pvt, u64 sys_addr,
- unsigned nid)
+ u8 nid)
{
u64 addr;
@@ -265,7 +265,7 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
u64 sys_addr)
{
struct amd64_pvt *pvt;
- unsigned node_id;
+ u8 node_id;
u32 intlv_en, bits;
/*
@@ -602,111 +602,6 @@ static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
return input_addr;
}
-
-/*
- * @input_addr is an InputAddr associated with the node represented by mci.
- * Translate @input_addr to a DramAddr and return the result.
- */
-static u64 input_addr_to_dram_addr(struct mem_ctl_info *mci, u64 input_addr)
-{
- struct amd64_pvt *pvt;
- unsigned node_id, intlv_shift;
- u64 bits, dram_addr;
- u32 intlv_sel;
-
- /*
- * Near the start of section 3.4.4 (p. 70, BKDG #26094, K8, revA-E)
- * shows how to translate a DramAddr to an InputAddr. Here we reverse
- * this procedure. When translating from a DramAddr to an InputAddr, the
- * bits used for node interleaving are discarded. Here we recover these
- * bits from the IntlvSel field of the DRAM Limit register (section
- * 3.4.4.2) for the node that input_addr is associated with.
- */
- pvt = mci->pvt_info;
- node_id = pvt->mc_node_id;
-
- BUG_ON(node_id > 7);
-
- intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
- if (intlv_shift == 0) {
- edac_dbg(1, " InputAddr 0x%lx translates to DramAddr of same value\n",
- (unsigned long)input_addr);
-
- return input_addr;
- }
-
- bits = ((input_addr & GENMASK(12, 35)) << intlv_shift) +
- (input_addr & 0xfff);
-
- intlv_sel = dram_intlv_sel(pvt, node_id) & ((1 << intlv_shift) - 1);
- dram_addr = bits + (intlv_sel << 12);
-
- edac_dbg(1, "InputAddr 0x%lx translates to DramAddr 0x%lx (%d node interleave bits)\n",
- (unsigned long)input_addr,
- (unsigned long)dram_addr, intlv_shift);
-
- return dram_addr;
-}
-
-/*
- * @dram_addr is a DramAddr that maps to the node represented by mci. Convert
- * @dram_addr to a SysAddr.
- */
-static u64 dram_addr_to_sys_addr(struct mem_ctl_info *mci, u64 dram_addr)
-{
- struct amd64_pvt *pvt = mci->pvt_info;
- u64 hole_base, hole_offset, hole_size, base, sys_addr;
- int ret = 0;
-
- ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
- &hole_size);
- if (!ret) {
- if ((dram_addr >= hole_base) &&
- (dram_addr < (hole_base + hole_size))) {
- sys_addr = dram_addr + hole_offset;
-
- edac_dbg(1, "using DHAR to translate DramAddr 0x%lx to SysAddr 0x%lx\n",
- (unsigned long)dram_addr,
- (unsigned long)sys_addr);
-
- return sys_addr;
- }
- }
-
- base = get_dram_base(pvt, pvt->mc_node_id);
- sys_addr = dram_addr + base;
-
- /*
- * The sys_addr we have computed up to this point is a 40-bit value
- * because the k8 deals with 40-bit values. However, the value we are
- * supposed to return is a full 64-bit physical address. The AMD
- * x86-64 architecture specifies that the most significant implemented
- * address bit through bit 63 of a physical address must be either all
- * 0s or all 1s. Therefore we sign-extend the 40-bit sys_addr to a
- * 64-bit value below. See section 3.4.2 of AMD publication 24592:
- * AMD x86-64 Architecture Programmer's Manual Volume 1 Application
- * Programming.
- */
- sys_addr |= ~((sys_addr & (1ull << 39)) - 1);
-
- edac_dbg(1, " Node %d, DramAddr 0x%lx to SysAddr 0x%lx\n",
- pvt->mc_node_id, (unsigned long)dram_addr,
- (unsigned long)sys_addr);
-
- return sys_addr;
-}
-
-/*
- * @input_addr is an InputAddr associated with the node given by mci. Translate
- * @input_addr to a SysAddr.
- */
-static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
- u64 input_addr)
-{
- return dram_addr_to_sys_addr(mci,
- input_addr_to_dram_addr(mci, input_addr));
-}
-
/* Map the Error address to a PAGE and PAGE OFFSET. */
static inline void error_address_to_page_and_offset(u64 error_address,
struct err_info *err)
@@ -939,7 +834,8 @@ static u64 get_error_address(struct mce *m)
struct amd64_pvt *pvt;
u64 cc6_base, tmp_addr;
u32 tmp;
- u8 mce_nid, intlv_en;
+ u16 mce_nid;
+ u8 intlv_en;
if ((addr & GENMASK(24, 47)) >> 24 != 0x00fdf7)
return addr;
@@ -979,10 +875,29 @@ static u64 get_error_address(struct mce *m)
return addr;
}
+static struct pci_dev *pci_get_related_function(unsigned int vendor,
+ unsigned int device,
+ struct pci_dev *related)
+{
+ struct pci_dev *dev = NULL;
+
+ while ((dev = pci_get_device(vendor, device, dev))) {
+ if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
+ (dev->bus->number == related->bus->number) &&
+ (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
+ break;
+ }
+
+ return dev;
+}
+
static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
{
+ struct amd_northbridge *nb;
+ struct pci_dev *misc, *f1 = NULL;
struct cpuinfo_x86 *c = &boot_cpu_data;
int off = range << 3;
+ u32 llim;
amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
@@ -996,30 +911,32 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
- /* Factor in CC6 save area by reading dst node's limit reg */
- if (c->x86 == 0x15) {
- struct pci_dev *f1 = NULL;
- u8 nid = dram_dst_node(pvt, range);
- u32 llim;
+ /* F15h: factor in CC6 save area by reading dst node's limit reg */
+ if (c->x86 != 0x15)
+ return;
- f1 = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x18 + nid, 1));
- if (WARN_ON(!f1))
- return;
+ nb = node_to_amd_nb(dram_dst_node(pvt, range));
+ if (WARN_ON(!nb))
+ return;
+
+ misc = nb->misc;
+ f1 = pci_get_related_function(misc->vendor, PCI_DEVICE_ID_AMD_15H_NB_F1, misc);
+ if (WARN_ON(!f1))
+ return;
- amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
+ amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
- pvt->ranges[range].lim.lo &= GENMASK(0, 15);
+ pvt->ranges[range].lim.lo &= GENMASK(0, 15);
- /* {[39:27],111b} */
- pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
+ /* {[39:27],111b} */
+ pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
- pvt->ranges[range].lim.hi &= GENMASK(0, 7);
+ pvt->ranges[range].lim.hi &= GENMASK(0, 7);
- /* [47:40] */
- pvt->ranges[range].lim.hi |= llim >> 13;
+ /* [47:40] */
+ pvt->ranges[range].lim.hi |= llim >> 13;
- pci_dev_put(f1);
- }
+ pci_dev_put(f1);
}
static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
@@ -1305,7 +1222,7 @@ static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
}
/* Convert the sys_addr to the normalized DCT address */
-static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, unsigned range,
+static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
u64 sys_addr, bool hi_rng,
u32 dct_sel_base_addr)
{
@@ -1381,7 +1298,7 @@ static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
* -EINVAL: NOT FOUND
* 0..csrow = Chip-Select Row
*/
-static int f1x_lookup_addr_in_dct(u64 in_addr, u32 nid, u8 dct)
+static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
@@ -1672,23 +1589,6 @@ static struct amd64_family_type amd64_family_types[] = {
},
};
-static struct pci_dev *pci_get_related_function(unsigned int vendor,
- unsigned int device,
- struct pci_dev *related)
-{
- struct pci_dev *dev = NULL;
-
- dev = pci_get_device(vendor, device, dev);
- while (dev) {
- if ((dev->bus->number == related->bus->number) &&
- (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
- break;
- dev = pci_get_device(vendor, device, dev);
- }
-
- return dev;
-}
-
/*
* These are tables of eigenvectors (one per line) which can be used for the
* construction of the syndrome tables. The modified syndrome search algorithm
@@ -1696,7 +1596,7 @@ static struct pci_dev *pci_get_related_function(unsigned int vendor,
*
* Algorithm courtesy of Ross LaFetra from AMD.
*/
-static u16 x4_vectors[] = {
+static const u16 x4_vectors[] = {
0x2f57, 0x1afe, 0x66cc, 0xdd88,
0x11eb, 0x3396, 0x7f4c, 0xeac8,
0x0001, 0x0002, 0x0004, 0x0008,
@@ -1735,7 +1635,7 @@ static u16 x4_vectors[] = {
0x19a9, 0x2efe, 0xb5cc, 0x6f88,
};
-static u16 x8_vectors[] = {
+static const u16 x8_vectors[] = {
0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
@@ -1757,7 +1657,7 @@ static u16 x8_vectors[] = {
0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
};
-static int decode_syndrome(u16 syndrome, u16 *vectors, unsigned num_vecs,
+static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
unsigned v_dim)
{
unsigned int i, err_sym;
@@ -2181,7 +2081,7 @@ static int init_csrows(struct mem_ctl_info *mci)
}
/* get all cores on this DCT */
-static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
+static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
{
int cpu;
@@ -2191,7 +2091,7 @@ static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, unsigned nid)
}
/* check MCG_CTL on all the cpus on this node */
-static bool amd64_nb_mce_bank_enabled_on_node(unsigned nid)
+static bool amd64_nb_mce_bank_enabled_on_node(u16 nid)
{
cpumask_var_t mask;
int cpu, nbe;
@@ -2224,7 +2124,7 @@ out:
return ret;
}
-static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
+static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
{
cpumask_var_t cmask;
int cpu;
@@ -2262,7 +2162,7 @@ static int toggle_ecc_err_reporting(struct ecc_settings *s, u8 nid, bool on)
return 0;
}
-static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
+static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
struct pci_dev *F3)
{
bool ret = true;
@@ -2314,7 +2214,7 @@ static bool enable_ecc_error_reporting(struct ecc_settings *s, u8 nid,
return ret;
}
-static void restore_ecc_error_reporting(struct ecc_settings *s, u8 nid,
+static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
struct pci_dev *F3)
{
u32 value, mask = 0x3; /* UECC/CECC enable */
@@ -2353,7 +2253,7 @@ static const char *ecc_msg =
"'ecc_enable_override'.\n"
" (Note that use of the override may cause unknown side effects.)\n";
-static bool ecc_enabled(struct pci_dev *F3, u8 nid)
+static bool ecc_enabled(struct pci_dev *F3, u16 nid)
{
u32 value;
u8 ecc_en = 0;
@@ -2474,7 +2374,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
int err = 0, ret;
- u8 nid = get_node_id(F2);
+ u16 nid = amd_get_node_id(F2);
ret = -ENOMEM;
pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
@@ -2566,7 +2466,7 @@ err_ret:
static int amd64_probe_one_instance(struct pci_dev *pdev,
const struct pci_device_id *mc_type)
{
- u8 nid = get_node_id(pdev);
+ u16 nid = amd_get_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s;
int ret = 0;
@@ -2616,7 +2516,7 @@ static void amd64_remove_one_instance(struct pci_dev *pdev)
{
struct mem_ctl_info *mci;
struct amd64_pvt *pvt;
- u8 nid = get_node_id(pdev);
+ u16 nid = amd_get_node_id(pdev);
struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
struct ecc_settings *s = ecc_stngs[nid];
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index e864f407806..35637d83f23 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -292,12 +292,6 @@
/* MSRs */
#define MSR_MCGCTL_NBE BIT(4)
-/* AMD sets the first MC device at device ID 0x18. */
-static inline u8 get_node_id(struct pci_dev *pdev)
-{
- return PCI_SLOT(pdev->devfn) - 0x18;
-}
-
enum amd_families {
K8_CPUS = 0,
F10_CPUS,
@@ -340,7 +334,7 @@ struct amd64_pvt {
/* pci_device handles which we utilize */
struct pci_dev *F1, *F2, *F3;
- unsigned mc_node_id; /* MC index of this MC node */
+ u16 mc_node_id; /* MC index of this MC node */
int ext_model; /* extended model value of this node */
int channel_count;
@@ -393,7 +387,7 @@ struct err_info {
u32 offset;
};
-static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
+static inline u64 get_dram_base(struct amd64_pvt *pvt, u8 i)
{
u64 addr = ((u64)pvt->ranges[i].base.lo & 0xffff0000) << 8;
@@ -403,7 +397,7 @@ static inline u64 get_dram_base(struct amd64_pvt *pvt, unsigned i)
return (((u64)pvt->ranges[i].base.hi & 0x000000ff) << 40) | addr;
}
-static inline u64 get_dram_limit(struct amd64_pvt *pvt, unsigned i)
+static inline u64 get_dram_limit(struct amd64_pvt *pvt, u8 i)
{
u64 lim = (((u64)pvt->ranges[i].lim.lo & 0xffff0000) << 8) | 0x00ffffff;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 281f566a551..d1e9eb191f2 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -340,7 +340,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
/*
* Alocate and fill the csrow/channels structs
*/
- mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL);
+ mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL);
if (!mci->csrows)
goto error;
for (row = 0; row < tot_csrows; row++) {
@@ -351,7 +351,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
csr->csrow_idx = row;
csr->mci = mci;
csr->nr_channels = tot_channels;
- csr->channels = kcalloc(sizeof(*csr->channels), tot_channels,
+ csr->channels = kcalloc(tot_channels, sizeof(*csr->channels),
GFP_KERNEL);
if (!csr->channels)
goto error;
@@ -369,7 +369,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
/*
* Allocate and fill the dimm structs
*/
- mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL);
+ mci->dimms = kcalloc(tot_dimms, sizeof(*mci->dimms), GFP_KERNEL);
if (!mci->dimms)
goto error;
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index dc6e905ee1a..0056c4dae9d 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -256,7 +256,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
struct edac_pci_dev_attribute *edac_pci_dev;
edac_pci_dev = (struct edac_pci_dev_attribute *)attr;
- if (edac_pci_dev->show)
+ if (edac_pci_dev->store)
return edac_pci_dev->store(edac_pci_dev->value, buffer, count);
return -EIO;
}
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index ad637572d8c..f3f0c930d55 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -39,30 +39,28 @@ EXPORT_SYMBOL_GPL(amd_unregister_ecc_decoder);
*/
/* transaction type */
-const char * const tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
-EXPORT_SYMBOL_GPL(tt_msgs);
+static const char * const tt_msgs[] = { "INSN", "DATA", "GEN", "RESV" };
/* cache level */
-const char * const ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
-EXPORT_SYMBOL_GPL(ll_msgs);
+static const char * const ll_msgs[] = { "RESV", "L1", "L2", "L3/GEN" };
/* memory transaction type */
-const char * const rrrr_msgs[] = {
+static const char * const rrrr_msgs[] = {
"GEN", "RD", "WR", "DRD", "DWR", "IRD", "PRF", "EV", "SNP"
};
-EXPORT_SYMBOL_GPL(rrrr_msgs);
/* participating processor */
const char * const pp_msgs[] = { "SRC", "RES", "OBS", "GEN" };
EXPORT_SYMBOL_GPL(pp_msgs);
/* request timeout */
-const char * const to_msgs[] = { "no timeout", "timed out" };
-EXPORT_SYMBOL_GPL(to_msgs);
+static const char * const to_msgs[] = { "no timeout", "timed out" };
/* memory or i/o */
-const char * const ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
-EXPORT_SYMBOL_GPL(ii_msgs);
+static const char * const ii_msgs[] = { "MEM", "RESV", "IO", "GEN" };
+
+/* internal error type */
+static const char * const uu_msgs[] = { "RESV", "RESV", "HWA", "RESV" };
static const char * const f15h_mc1_mce_desc[] = {
"UC during a demand linefill from L2",
@@ -176,7 +174,7 @@ static bool k8_mc0_mce(u16 ec, u8 xec)
return f10h_mc0_mce(ec, xec);
}
-static bool f14h_mc0_mce(u16 ec, u8 xec)
+static bool cat_mc0_mce(u16 ec, u8 xec)
{
u8 r4 = R4(ec);
bool ret = true;
@@ -330,22 +328,28 @@ static bool k8_mc1_mce(u16 ec, u8 xec)
return ret;
}
-static bool f14h_mc1_mce(u16 ec, u8 xec)
+static bool cat_mc1_mce(u16 ec, u8 xec)
{
u8 r4 = R4(ec);
bool ret = true;
- if (MEM_ERROR(ec)) {
- if (TT(ec) != 0 || LL(ec) != 1)
- ret = false;
+ if (!MEM_ERROR(ec))
+ return false;
+
+ if (TT(ec) != TT_INSTR)
+ return false;
+
+ if (r4 == R4_IRD)
+ pr_cont("Data/tag array parity error for a tag hit.\n");
+ else if (r4 == R4_SNOOP)
+ pr_cont("Tag error during snoop/victimization.\n");
+ else if (xec == 0x0)
+ pr_cont("Tag parity error from victim castout.\n");
+ else if (xec == 0x2)
+ pr_cont("Microcode patch RAM parity error.\n");
+ else
+ ret = false;
- if (r4 == R4_IRD)
- pr_cont("Data/tag array parity error for a tag hit.\n");
- else if (r4 == R4_SNOOP)
- pr_cont("Tag error during snoop/victimization.\n");
- else
- ret = false;
- }
return ret;
}
@@ -399,12 +403,9 @@ static void decode_mc1_mce(struct mce *m)
pr_emerg(HW_ERR "Corrupted MC1 MCE info?\n");
}
-static void decode_mc2_mce(struct mce *m)
+static bool k8_mc2_mce(u16 ec, u8 xec)
{
- u16 ec = EC(m->status);
- u8 xec = XEC(m->status, xec_mask);
-
- pr_emerg(HW_ERR "MC2 Error");
+ bool ret = true;
if (xec == 0x1)
pr_cont(" in the write data buffers.\n");
@@ -429,24 +430,18 @@ static void decode_mc2_mce(struct mce *m)
pr_cont(": %s parity/ECC error during data "
"access from L2.\n", R4_MSG(ec));
else
- goto wrong_mc2_mce;
+ ret = false;
} else
- goto wrong_mc2_mce;
+ ret = false;
} else
- goto wrong_mc2_mce;
-
- return;
+ ret = false;
- wrong_mc2_mce:
- pr_emerg(HW_ERR "Corrupted MC2 MCE info?\n");
+ return ret;
}
-static void decode_f15_mc2_mce(struct mce *m)
+static bool f15h_mc2_mce(u16 ec, u8 xec)
{
- u16 ec = EC(m->status);
- u8 xec = XEC(m->status, xec_mask);
-
- pr_emerg(HW_ERR "MC2 Error: ");
+ bool ret = true;
if (TLB_ERROR(ec)) {
if (xec == 0x0)
@@ -454,10 +449,10 @@ static void decode_f15_mc2_mce(struct mce *m)
else if (xec == 0x1)
pr_cont("Poison data provided for TLB fill.\n");
else
- goto wrong_f15_mc2_mce;
+ ret = false;
} else if (BUS_ERROR(ec)) {
if (xec > 2)
- goto wrong_f15_mc2_mce;
+ ret = false;
pr_cont("Error during attempted NB data read.\n");
} else if (MEM_ERROR(ec)) {
@@ -471,14 +466,63 @@ static void decode_f15_mc2_mce(struct mce *m)
break;
default:
- goto wrong_f15_mc2_mce;
+ ret = false;
}
}
- return;
+ return ret;
+}
- wrong_f15_mc2_mce:
- pr_emerg(HW_ERR "Corrupted MC2 MCE info?\n");
+static bool f16h_mc2_mce(u16 ec, u8 xec)
+{
+ u8 r4 = R4(ec);
+
+ if (!MEM_ERROR(ec))
+ return false;
+
+ switch (xec) {
+ case 0x04 ... 0x05:
+ pr_cont("%cBUFF parity error.\n", (r4 == R4_RD) ? 'I' : 'O');
+ break;
+
+ case 0x09 ... 0x0b:
+ case 0x0d ... 0x0f:
+ pr_cont("ECC error in L2 tag (%s).\n",
+ ((r4 == R4_GEN) ? "BankReq" :
+ ((r4 == R4_SNOOP) ? "Prb" : "Fill")));
+ break;
+
+ case 0x10 ... 0x19:
+ case 0x1b:
+ pr_cont("ECC error in L2 data array (%s).\n",
+ (((r4 == R4_RD) && !(xec & 0x3)) ? "Hit" :
+ ((r4 == R4_GEN) ? "Attr" :
+ ((r4 == R4_EVICT) ? "Vict" : "Fill"))));
+ break;
+
+ case 0x1c ... 0x1d:
+ case 0x1f:
+ pr_cont("Parity error in L2 attribute bits (%s).\n",
+ ((r4 == R4_RD) ? "Hit" :
+ ((r4 == R4_GEN) ? "Attr" : "Fill")));
+ break;
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static void decode_mc2_mce(struct mce *m)
+{
+ u16 ec = EC(m->status);
+ u8 xec = XEC(m->status, xec_mask);
+
+ pr_emerg(HW_ERR "MC2 Error: ");
+
+ if (!fam_ops->mc2_mce(ec, xec))
+ pr_cont(HW_ERR "Corrupted MC2 MCE info?\n");
}
static void decode_mc3_mce(struct mce *m)
@@ -547,7 +591,7 @@ static void decode_mc4_mce(struct mce *m)
return;
case 0x19:
- if (boot_cpu_data.x86 == 0x15)
+ if (boot_cpu_data.x86 == 0x15 || boot_cpu_data.x86 == 0x16)
pr_cont("Compute Unit Data Error.\n");
else
goto wrong_mc4_mce;
@@ -633,6 +677,10 @@ static void decode_mc6_mce(struct mce *m)
static inline void amd_decode_err_code(u16 ec)
{
+ if (INT_ERROR(ec)) {
+ pr_emerg(HW_ERR "internal: %s\n", UU_MSG(ec));
+ return;
+ }
pr_emerg(HW_ERR "cache level: %s", LL_MSG(ec));
@@ -702,10 +750,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
break;
case 2:
- if (c->x86 == 0x15)
- decode_f15_mc2_mce(m);
- else
- decode_mc2_mce(m);
+ decode_mc2_mce(m);
break;
case 3:
@@ -740,7 +785,7 @@ int amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data)
((m->status & MCI_STATUS_PCC) ? "PCC" : "-"),
((m->status & MCI_STATUS_ADDRV) ? "AddrV" : "-"));
- if (c->x86 == 0x15)
+ if (c->x86 == 0x15 || c->x86 == 0x16)
pr_cont("|%s|%s",
((m->status & MCI_STATUS_DEFERRED) ? "Deferred" : "-"),
((m->status & MCI_STATUS_POISON) ? "Poison" : "-"));
@@ -772,7 +817,7 @@ static int __init mce_amd_init(void)
if (c->x86_vendor != X86_VENDOR_AMD)
return 0;
- if (c->x86 < 0xf || c->x86 > 0x15)
+ if (c->x86 < 0xf || c->x86 > 0x16)
return 0;
fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
@@ -783,33 +828,46 @@ static int __init mce_amd_init(void)
case 0xf:
fam_ops->mc0_mce = k8_mc0_mce;
fam_ops->mc1_mce = k8_mc1_mce;
+ fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x10:
fam_ops->mc0_mce = f10h_mc0_mce;
fam_ops->mc1_mce = k8_mc1_mce;
+ fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x11:
fam_ops->mc0_mce = k8_mc0_mce;
fam_ops->mc1_mce = k8_mc1_mce;
+ fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x12:
fam_ops->mc0_mce = f12h_mc0_mce;
fam_ops->mc1_mce = k8_mc1_mce;
+ fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x14:
nb_err_cpumask = 0x3;
- fam_ops->mc0_mce = f14h_mc0_mce;
- fam_ops->mc1_mce = f14h_mc1_mce;
+ fam_ops->mc0_mce = cat_mc0_mce;
+ fam_ops->mc1_mce = cat_mc1_mce;
+ fam_ops->mc2_mce = k8_mc2_mce;
break;
case 0x15:
xec_mask = 0x1f;
fam_ops->mc0_mce = f15h_mc0_mce;
fam_ops->mc1_mce = f15h_mc1_mce;
+ fam_ops->mc2_mce = f15h_mc2_mce;
+ break;
+
+ case 0x16:
+ xec_mask = 0x1f;
+ fam_ops->mc0_mce = cat_mc0_mce;
+ fam_ops->mc1_mce = cat_mc1_mce;
+ fam_ops->mc2_mce = f16h_mc2_mce;
break;
default:
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index 679679951e2..51b7e3a36e3 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -14,6 +14,7 @@
#define TLB_ERROR(x) (((x) & 0xFFF0) == 0x0010)
#define MEM_ERROR(x) (((x) & 0xFF00) == 0x0100)
#define BUS_ERROR(x) (((x) & 0xF800) == 0x0800)
+#define INT_ERROR(x) (((x) & 0xF4FF) == 0x0400)
#define TT(x) (((x) >> 2) & 0x3)
#define TT_MSG(x) tt_msgs[TT(x)]
@@ -25,6 +26,8 @@
#define TO_MSG(x) to_msgs[TO(x)]
#define PP(x) (((x) >> 9) & 0x3)
#define PP_MSG(x) pp_msgs[PP(x)]
+#define UU(x) (((x) >> 8) & 0x3)
+#define UU_MSG(x) uu_msgs[UU(x)]
#define R4(x) (((x) >> 4) & 0xf)
#define R4_MSG(x) ((R4(x) < 9) ? rrrr_msgs[R4(x)] : "Wrong R4!")
@@ -32,6 +35,8 @@
#define MCI_STATUS_DEFERRED BIT_64(44)
#define MCI_STATUS_POISON BIT_64(43)
+extern const char * const pp_msgs[];
+
enum tt_ids {
TT_INSTR = 0,
TT_DATA,
@@ -65,19 +70,13 @@ enum rrrr_ids {
R4_SNOOP,
};
-extern const char * const tt_msgs[];
-extern const char * const ll_msgs[];
-extern const char * const rrrr_msgs[];
-extern const char * const pp_msgs[];
-extern const char * const to_msgs[];
-extern const char * const ii_msgs[];
-
/*
* per-family decoder ops
*/
struct amd_decoder_ops {
bool (*mc0_mce)(u16, u8);
bool (*mc1_mce)(u16, u8);
+ bool (*mc2_mce)(u16, u8);
};
void amd_report_gart_errors(bool);
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 42a840d530a..3eb32f62d72 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -301,7 +301,7 @@ int mpc85xx_pci_err_probe(struct platform_device *op)
"[EDAC] PCI err", pci);
if (res < 0) {
printk(KERN_ERR
- "%s: Unable to requiest irq %d for "
+ "%s: Unable to request irq %d for "
"MPC85xx PCI err\n", __func__, pdata->irq);
irq_dispose_mapping(pdata->irq);
res = -ENODEV;
@@ -583,7 +583,7 @@ static int mpc85xx_l2_err_probe(struct platform_device *op)
"[EDAC] L2 err", edac_dev);
if (res < 0) {
printk(KERN_ERR
- "%s: Unable to requiest irq %d for "
+ "%s: Unable to request irq %d for "
"MPC85xx L2 err\n", __func__, pdata->irq);
irq_dispose_mapping(pdata->irq);
res = -ENODEV;