aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kernel/cpu/intel_cacheinfo.c
diff options
context:
space:
mode:
authorBorislav Petkov <borislav.petkov@amd.com>2010-04-22 16:07:01 +0200
committerH. Peter Anvin <hpa@zytor.com>2010-04-22 17:17:23 -0700
commitba06edb63f5ef2913aad37070eaec3c9d8ac73b8 (patch)
tree79605f9dfc645a6be5045e1fb18201befe27fef3 /arch/x86/kernel/cpu/intel_cacheinfo.c
parent9350f982e4fe539e83a2d4a13e9b53ad8253c4a8 (diff)
x86, cacheinfo: Make L3 cache info per node
Currently, we're allocating L3 cache info and calculating indices for each online cpu which is clearly superfluous. Instead, we need to do this per-node as is each L3 cache. No functional change, only per-cpu memory savings. -v2: Allocate L3 cache descriptors array dynamically. Signed-off-by: Borislav Petkov <borislav.petkov@amd.com> LKML-Reference: <1271945222-5283-5-git-send-email-bp@amd64.org> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/kernel/cpu/intel_cacheinfo.c')
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c59
1 files changed, 45 insertions, 14 deletions
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index ff663ca63fd..1346e9c23fc 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -307,19 +307,18 @@ struct _cache_attr {
};
#ifdef CONFIG_CPU_SUP_AMD
+
+/*
+ * L3 cache descriptors
+ */
+static struct amd_l3_cache **__cpuinitdata l3_caches;
+
static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
{
- /*
- * We're called over smp_call_function_single() and therefore
- * are on the correct cpu.
- */
- int cpu = smp_processor_id();
- int node = cpu_to_node(cpu);
- struct pci_dev *dev = node_to_k8_nb_misc(node);
unsigned int sc0, sc1, sc2, sc3;
u32 val = 0;
- pci_read_config_dword(dev, 0x1C4, &val);
+ pci_read_config_dword(l3->dev, 0x1C4, &val);
/* calculate subcache sizes */
l3->subcaches[0] = sc0 = !(val & BIT(0));
@@ -328,13 +327,31 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
+}
+
+static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)
+{
+ struct amd_l3_cache *l3;
+ struct pci_dev *dev = node_to_k8_nb_misc(node);
+
+ l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC);
+ if (!l3) {
+ printk(KERN_WARNING "Error allocating L3 struct\n");
+ return NULL;
+ }
l3->dev = dev;
+
+ amd_calc_l3_indices(l3);
+
+ return l3;
}
static void __cpuinit
amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
{
+ int node;
+
if (boot_cpu_data.x86 != 0x10)
return;
@@ -355,14 +372,28 @@ amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
if (num_k8_northbridges == 0)
return;
- this_leaf->l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC);
- if (!this_leaf->l3) {
- printk(KERN_WARNING "Error allocating L3 struct\n");
- return;
+ /*
+ * Strictly speaking, the amount in @size below is leaked since it is
+ * never freed but this is done only on shutdown so it doesn't matter.
+ */
+ if (!l3_caches) {
+ int size = num_k8_northbridges * sizeof(struct amd_l3_cache *);
+
+ l3_caches = kzalloc(size, GFP_ATOMIC);
+ if (!l3_caches)
+ return;
}
- this_leaf->l3->can_disable = true;
- amd_calc_l3_indices(this_leaf->l3);
+ node = amd_get_nb_id(smp_processor_id());
+
+ if (!l3_caches[node]) {
+ l3_caches[node] = amd_init_l3_cache(node);
+ l3_caches[node]->can_disable = true;
+ }
+
+ WARN_ON(!l3_caches[node]);
+
+ this_leaf->l3 = l3_caches[node];
}
static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,