aboutsummaryrefslogtreecommitdiff
path: root/kernel/module.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2013-07-03 10:06:29 +0930
committerRusty Russell <rusty@rustcorp.com.au>2013-07-03 10:15:10 +0930
commit9eb76d7797b892a1dad4f2efb6f786681306dd13 (patch)
treeef41bc026c52fdaca74920deafa8801e1e9b9df0 /kernel/module.c
parent8d8022e8aba85192e937f1f0f7450e256d66ae5c (diff)
module: cleanup call chain.
Fold alloc_module_percpu into percpu_modalloc(). Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'kernel/module.c')
-rw-r--r--kernel/module.c35
1 files changed, 16 insertions, 19 deletions
diff --git a/kernel/module.c b/kernel/module.c
index d1a161be7b04..c18107942ac2 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -482,23 +482,28 @@ static inline void __percpu *mod_percpu(struct module *mod)
return mod->percpu;
}
-static int percpu_modalloc(struct module *mod,
- unsigned long size, unsigned long align)
+static int percpu_modalloc(struct module *mod, struct load_info *info)
{
+ Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
+ unsigned long align = pcpusec->sh_addralign;
+
+ if (!pcpusec->sh_size)
+ return 0;
+
if (align > PAGE_SIZE) {
printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
mod->name, align, PAGE_SIZE);
align = PAGE_SIZE;
}
- mod->percpu = __alloc_reserved_percpu(size, align);
+ mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
if (!mod->percpu) {
printk(KERN_WARNING
"%s: Could not allocate %lu bytes percpu data\n",
- mod->name, size);
+ mod->name, (unsigned long)pcpusec->sh_size);
return -ENOMEM;
}
- mod->percpu_size = size;
+ mod->percpu_size = pcpusec->sh_size;
return 0;
}
@@ -563,10 +568,12 @@ static inline void __percpu *mod_percpu(struct module *mod)
{
return NULL;
}
-static inline int percpu_modalloc(struct module *mod,
- unsigned long size, unsigned long align)
+static int percpu_modalloc(struct module *mod, struct load_info *info)
{
- return -ENOMEM;
+ /* UP modules shouldn't have this section: ENOMEM isn't quite right */
+ if (info->sechdrs[info->index.pcpu].sh_size != 0)
+ return -ENOMEM;
+ return 0;
}
static inline void percpu_modfree(struct module *mod)
{
@@ -2976,16 +2983,6 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
return mod;
}
-static int alloc_module_percpu(struct module *mod, struct load_info *info)
-{
- Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
- if (!pcpusec->sh_size)
- return 0;
-
- /* We have a special allocation for this section. */
- return percpu_modalloc(mod, pcpusec->sh_size, pcpusec->sh_addralign);
-}
-
/* mod is no longer valid after this! */
static void module_deallocate(struct module *mod, struct load_info *info)
{
@@ -3260,7 +3257,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
#endif
/* To avoid stressing percpu allocator, do this once we're unique. */
- err = alloc_module_percpu(mod, info);
+ err = percpu_modalloc(mod, info);
if (err)
goto unlink_mod;