aboutsummaryrefslogtreecommitdiff
path: root/arch/m32r
diff options
context:
space:
mode:
Diffstat (limited to 'arch/m32r')
-rw-r--r--arch/m32r/Kconfig8
-rw-r--r--arch/m32r/Kconfig.debug9
-rw-r--r--arch/m32r/include/asm/mmzone.h8
-rw-r--r--arch/m32r/include/asm/smp.h6
-rw-r--r--arch/m32r/include/asm/unistd.h3
-rw-r--r--arch/m32r/kernel/smp.c68
-rw-r--r--arch/m32r/kernel/smpboot.c48
-rw-r--r--arch/m32r/kernel/syscall_table.S1
-rw-r--r--arch/m32r/kernel/vmlinux.lds.S3
-rw-r--r--arch/m32r/mm/discontig.c1
-rw-r--r--arch/m32r/mm/init.c2
11 files changed, 61 insertions, 96 deletions
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index 736b808d229..85b44e85822 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -256,14 +256,6 @@ config ARCH_HAS_ILOG2_U64
bool
default n
-config GENERIC_FIND_NEXT_BIT
- bool
- default y
-
-config GENERIC_FIND_BIT_LE
- bool
- default y
-
config GENERIC_HWEIGHT
bool
default y
diff --git a/arch/m32r/Kconfig.debug b/arch/m32r/Kconfig.debug
index 2e1019ddbb2..bb1afc1a31c 100644
--- a/arch/m32r/Kconfig.debug
+++ b/arch/m32r/Kconfig.debug
@@ -9,15 +9,6 @@ config DEBUG_STACKOVERFLOW
This option will cause messages to be printed if free stack space
drops below a certain limit.
-config DEBUG_STACK_USAGE
- bool "Stack utilization instrumentation"
- depends on DEBUG_KERNEL
- help
- Enables the display of the minimum amount of free stack which each
- task has ever had available in the sysrq-T and sysrq-P debug output.
-
- This option will slow down process creation somewhat.
-
config DEBUG_PAGEALLOC
bool "Debug page memory allocations"
depends on DEBUG_KERNEL && BROKEN
diff --git a/arch/m32r/include/asm/mmzone.h b/arch/m32r/include/asm/mmzone.h
index 9f3b5accda8..115ced33feb 100644
--- a/arch/m32r/include/asm/mmzone.h
+++ b/arch/m32r/include/asm/mmzone.h
@@ -14,12 +14,6 @@ extern struct pglist_data *node_data[];
#define NODE_DATA(nid) (node_data[nid])
#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
-#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
-#define node_end_pfn(nid) \
-({ \
- pg_data_t *__pgdat = NODE_DATA(nid); \
- __pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1; \
-})
#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
/*
@@ -44,7 +38,7 @@ static __inline__ int pfn_to_nid(unsigned long pfn)
int node;
for (node = 0 ; node < MAX_NUMNODES ; node++)
- if (pfn >= node_start_pfn(node) && pfn <= node_end_pfn(node))
+ if (pfn >= node_start_pfn(node) && pfn < node_end_pfn(node))
break;
return node;
diff --git a/arch/m32r/include/asm/smp.h b/arch/m32r/include/asm/smp.h
index e67ded1aab9..cf7829a6155 100644
--- a/arch/m32r/include/asm/smp.h
+++ b/arch/m32r/include/asm/smp.h
@@ -81,11 +81,11 @@ static __inline__ int cpu_number_map(int cpu)
static __inline__ unsigned int num_booting_cpus(void)
{
- return cpus_weight(cpu_callout_map);
+ return cpumask_weight(&cpu_callout_map);
}
extern void smp_send_timer(void);
-extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
+extern unsigned long send_IPI_mask_phys(const cpumask_t*, int, int);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -94,8 +94,6 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
#define NO_PROC_ID (0xff) /* No processor magic marker */
-#define PROC_CHANGE_PENALTY (15) /* Schedule penalty */
-
/*
* M32R-mp IPI
*/
diff --git a/arch/m32r/include/asm/unistd.h b/arch/m32r/include/asm/unistd.h
index c70545689da..3e1db561aac 100644
--- a/arch/m32r/include/asm/unistd.h
+++ b/arch/m32r/include/asm/unistd.h
@@ -330,10 +330,11 @@
/* #define __NR_timerfd 322 removed */
#define __NR_eventfd 323
#define __NR_fallocate 324
+#define __NR_setns 325
#ifdef __KERNEL__
-#define NR_syscalls 325
+#define NR_syscalls 326
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_STAT64
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index 31cef20b299..092d40a6708 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -30,6 +30,7 @@
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/m32r.h>
+#include <asm/tlbflush.h>
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
/* Data structures and variables */
@@ -61,33 +62,22 @@ extern spinlock_t ipi_lock[];
/* Function Prototypes */
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
-void smp_send_reschedule(int);
void smp_reschedule_interrupt(void);
-
-void smp_flush_cache_all(void);
void smp_flush_cache_all_interrupt(void);
-void smp_flush_tlb_all(void);
static void flush_tlb_all_ipi(void *);
-
-void smp_flush_tlb_mm(struct mm_struct *);
-void smp_flush_tlb_range(struct vm_area_struct *, unsigned long, \
- unsigned long);
-void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
static void flush_tlb_others(cpumask_t, struct mm_struct *,
struct vm_area_struct *, unsigned long);
+
void smp_invalidate_interrupt(void);
-void smp_send_stop(void);
static void stop_this_cpu(void *);
-void smp_send_timer(void);
void smp_ipi_timer_interrupt(struct pt_regs *);
void smp_local_timer_interrupt(void);
static void send_IPI_allbutself(int, int);
static void send_IPI_mask(const struct cpumask *, int, int);
-unsigned long send_IPI_mask_phys(cpumask_t, int, int);
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
/* Rescheduling request Routines */
@@ -122,8 +112,6 @@ void smp_send_reschedule(int cpu_id)
*
* Description: This routine executes on CPU which received
* 'RESCHEDULE_IPI'.
- * Rescheduling is processed at the exit of interrupt
- * operation.
*
* Born on Date: 2002.02.05
*
@@ -138,7 +126,7 @@ void smp_send_reschedule(int cpu_id)
*==========================================================================*/
void smp_reschedule_interrupt(void)
{
- /* nothing to do */
+ scheduler_ipi();
}
/*==========================================================================*
@@ -164,10 +152,10 @@ void smp_flush_cache_all(void)
unsigned long *mask;
preempt_disable();
- cpumask = cpu_online_map;
- cpu_clear(smp_processor_id(), cpumask);
+ cpumask_copy(&cpumask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &cpumask);
spin_lock(&flushcache_lock);
- mask=cpus_addr(cpumask);
+ mask=cpumask_bits(&cpumask);
atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
_flush_cache_copyback_all();
@@ -265,8 +253,8 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
preempt_disable();
cpu_id = smp_processor_id();
mmc = &mm->context[cpu_id];
- cpu_mask = *mm_cpumask(mm);
- cpu_clear(cpu_id, cpu_mask);
+ cpumask_copy(&cpu_mask, mm_cpumask(mm));
+ cpumask_clear_cpu(cpu_id, &cpu_mask);
if (*mmc != NO_CONTEXT) {
local_irq_save(flags);
@@ -277,7 +265,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
local_irq_restore(flags);
}
- if (!cpus_empty(cpu_mask))
+ if (!cpumask_empty(&cpu_mask))
flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
preempt_enable();
@@ -335,8 +323,8 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
preempt_disable();
cpu_id = smp_processor_id();
mmc = &mm->context[cpu_id];
- cpu_mask = *mm_cpumask(mm);
- cpu_clear(cpu_id, cpu_mask);
+ cpumask_copy(&cpu_mask, mm_cpumask(mm));
+ cpumask_clear_cpu(cpu_id, &cpu_mask);
#ifdef DEBUG_SMP
if (!mm)
@@ -350,7 +338,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
__flush_tlb_page(va);
local_irq_restore(flags);
}
- if (!cpus_empty(cpu_mask))
+ if (!cpumask_empty(&cpu_mask))
flush_tlb_others(cpu_mask, mm, vma, va);
preempt_enable();
@@ -397,14 +385,14 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
* - current CPU must not be in mask
* - mask must exist :)
*/
- BUG_ON(cpus_empty(cpumask));
+ BUG_ON(cpumask_empty(&cpumask));
- BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+ BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
BUG_ON(!mm);
/* If a CPU which we ran on has gone down, OK. */
- cpus_and(cpumask, cpumask, cpu_online_map);
- if (cpus_empty(cpumask))
+ cpumask_and(&cpumask, &cpumask, cpu_online_mask);
+ if (cpumask_empty(&cpumask))
return;
/*
@@ -418,7 +406,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
flush_mm = mm;
flush_vma = vma;
flush_va = va;
- mask=cpus_addr(cpumask);
+ mask=cpumask_bits(&cpumask);
atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
/*
@@ -427,7 +415,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
*/
send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
- while (!cpus_empty(flush_cpumask)) {
+ while (!cpumask_empty((cpumask_t*)&flush_cpumask)) {
/* nothing. lockup detection does not belong here */
mb();
}
@@ -462,7 +450,7 @@ void smp_invalidate_interrupt(void)
int cpu_id = smp_processor_id();
unsigned long *mmc = &flush_mm->context[cpu_id];
- if (!cpu_isset(cpu_id, flush_cpumask))
+ if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
return;
if (flush_va == FLUSH_ALL) {
@@ -480,7 +468,7 @@ void smp_invalidate_interrupt(void)
__flush_tlb_page(va);
}
}
- cpu_clear(cpu_id, flush_cpumask);
+ cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask);
}
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
@@ -532,7 +520,7 @@ static void stop_this_cpu(void *dummy)
/*
* Remove this CPU:
*/
- cpu_clear(cpu_id, cpu_online_map);
+ set_cpu_online(cpu_id, false);
/*
* PSW IE = 1;
@@ -727,8 +715,8 @@ static void send_IPI_allbutself(int ipi_num, int try)
{
cpumask_t cpumask;
- cpumask = cpu_online_map;
- cpu_clear(smp_processor_id(), cpumask);
+ cpumask_copy(&cpumask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &cpumask);
send_IPI_mask(&cpumask, ipi_num, try);
}
@@ -765,13 +753,13 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
cpumask_and(&tmp, cpumask, cpu_online_mask);
BUG_ON(!cpumask_equal(cpumask, &tmp));
- physid_mask = CPU_MASK_NONE;
+ cpumask_clear(&physid_mask);
for_each_cpu(cpu_id, cpumask) {
if ((phys_id = cpu_to_physid(cpu_id)) != -1)
- cpu_set(phys_id, physid_mask);
+ cpumask_set_cpu(phys_id, &physid_mask);
}
- send_IPI_mask_phys(physid_mask, ipi_num, try);
+ send_IPI_mask_phys(&physid_mask, ipi_num, try);
}
/*==========================================================================*
@@ -794,14 +782,14 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
* ---------- --- --------------------------------------------------------
*
*==========================================================================*/
-unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
+unsigned long send_IPI_mask_phys(const cpumask_t *physid_mask, int ipi_num,
int try)
{
spinlock_t *ipilock;
volatile unsigned long *ipicr_addr;
unsigned long ipicr_val;
unsigned long my_physid_mask;
- unsigned long mask = cpus_addr(physid_mask)[0];
+ unsigned long mask = cpumask_bits(physid_mask)[0];
if (mask & ~physids_coerce(phys_cpu_present_map))
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index e034844cfc0..cfdbe5d1500 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -135,9 +135,9 @@ void __devinit smp_prepare_boot_cpu(void)
{
bsp_phys_id = hard_smp_processor_id();
physid_set(bsp_phys_id, phys_cpu_present_map);
- cpu_set(0, cpu_online_map); /* BSP's cpu_id == 0 */
- cpu_set(0, cpu_callout_map);
- cpu_set(0, cpu_callin_map);
+ set_cpu_online(0, true); /* BSP's cpu_id == 0 */
+ cpumask_set_cpu(0, &cpu_callout_map);
+ cpumask_set_cpu(0, &cpu_callin_map);
/*
* Initialize the logical to physical CPU number mapping
@@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
physid_set(phys_id, phys_cpu_present_map);
#ifndef CONFIG_HOTPLUG_CPU
- init_cpu_present(&cpu_possible_map);
+ init_cpu_present(cpu_possible_mask);
#endif
show_mp_info(nr_cpu);
@@ -294,10 +294,10 @@ static void __init do_boot_cpu(int phys_id)
send_status = 0;
boot_status = 0;
- cpu_set(phys_id, cpu_bootout_map);
+ cpumask_set_cpu(phys_id, &cpu_bootout_map);
/* Send Startup IPI */
- send_IPI_mask_phys(cpumask_of_cpu(phys_id), CPU_BOOT_IPI, 0);
+ send_IPI_mask_phys(cpumask_of(phys_id), CPU_BOOT_IPI, 0);
Dprintk("Waiting for send to finish...\n");
timeout = 0;
@@ -306,7 +306,7 @@ static void __init do_boot_cpu(int phys_id)
do {
Dprintk("+");
udelay(1000);
- send_status = !cpu_isset(phys_id, cpu_bootin_map);
+ send_status = !cpumask_test_cpu(phys_id, &cpu_bootin_map);
} while (send_status && (timeout++ < 100));
Dprintk("After Startup.\n");
@@ -316,19 +316,19 @@ static void __init do_boot_cpu(int phys_id)
* allow APs to start initializing.
*/
Dprintk("Before Callout %d.\n", cpu_id);
- cpu_set(cpu_id, cpu_callout_map);
+ cpumask_set_cpu(cpu_id, &cpu_callout_map);
Dprintk("After Callout %d.\n", cpu_id);
/*
* Wait 5s total for a response
*/
for (timeout = 0; timeout < 5000; timeout++) {
- if (cpu_isset(cpu_id, cpu_callin_map))
+ if (cpumask_test_cpu(cpu_id, &cpu_callin_map))
break; /* It has booted */
udelay(1000);
}
- if (cpu_isset(cpu_id, cpu_callin_map)) {
+ if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
/* number CPUs logically, starting from 1 (BSP is 0) */
Dprintk("OK.\n");
} else {
@@ -340,9 +340,9 @@ static void __init do_boot_cpu(int phys_id)
if (send_status || boot_status) {
unmap_cpu_to_physid(cpu_id, phys_id);
- cpu_clear(cpu_id, cpu_callout_map);
- cpu_clear(cpu_id, cpu_callin_map);
- cpu_clear(cpu_id, cpu_initialized);
+ cpumask_clear_cpu(cpu_id, &cpu_callout_map);
+ cpumask_clear_cpu(cpu_id, &cpu_callin_map);
+ cpumask_clear_cpu(cpu_id, &cpu_initialized);
cpucount--;
}
}
@@ -351,17 +351,17 @@ int __cpuinit __cpu_up(unsigned int cpu_id)
{
int timeout;
- cpu_set(cpu_id, smp_commenced_mask);
+ cpumask_set_cpu(cpu_id, &smp_commenced_mask);
/*
* Wait 5s total for a response
*/
for (timeout = 0; timeout < 5000; timeout++) {
- if (cpu_isset(cpu_id, cpu_online_map))
+ if (cpu_online(cpu_id))
break;
udelay(1000);
}
- if (!cpu_isset(cpu_id, cpu_online_map))
+ if (!cpu_online(cpu_id))
BUG();
return 0;
@@ -373,11 +373,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
unsigned long bogosum = 0;
for (timeout = 0; timeout < 5000; timeout++) {
- if (cpus_equal(cpu_callin_map, cpu_online_map))
+ if (cpumask_equal(&cpu_callin_map, cpu_online_mask))
break;
udelay(1000);
}
- if (!cpus_equal(cpu_callin_map, cpu_online_map))
+ if (!cpumask_equal(&cpu_callin_map, cpu_online_mask))
BUG();
for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++)
@@ -388,7 +388,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
*/
Dprintk("Before bogomips.\n");
if (cpucount) {
- for_each_cpu_mask(cpu_id, cpu_online_map)
+ for_each_cpu(cpu_id,cpu_online_mask)
bogosum += cpu_data[cpu_id].loops_per_jiffy;
printk(KERN_INFO "Total of %d processors activated " \
@@ -425,7 +425,7 @@ int __init start_secondary(void *unused)
cpu_init();
preempt_disable();
smp_callin();
- while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
+ while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
cpu_relax();
smp_online();
@@ -463,7 +463,7 @@ static void __init smp_callin(void)
int cpu_id = smp_processor_id();
unsigned long timeout;
- if (cpu_isset(cpu_id, cpu_callin_map)) {
+ if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
printk("huh, phys CPU#%d, CPU#%d already present??\n",
phys_id, cpu_id);
BUG();
@@ -474,7 +474,7 @@ static void __init smp_callin(void)
timeout = jiffies + (2 * HZ);
while (time_before(jiffies, timeout)) {
/* Has the boot CPU finished it's STARTUP sequence ? */
- if (cpu_isset(cpu_id, cpu_callout_map))
+ if (cpumask_test_cpu(cpu_id, &cpu_callout_map))
break;
cpu_relax();
}
@@ -486,7 +486,7 @@ static void __init smp_callin(void)
}
/* Allow the master to continue. */
- cpu_set(cpu_id, cpu_callin_map);
+ cpumask_set_cpu(cpu_id, &cpu_callin_map);
}
static void __init smp_online(void)
@@ -503,7 +503,7 @@ static void __init smp_online(void)
/* Save our processor parameters */
smp_store_cpu_info(cpu_id);
- cpu_set(cpu_id, cpu_online_map);
+ set_cpu_online(cpu_id, true);
}
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S
index 60536e27123..528f2e6ad06 100644
--- a/arch/m32r/kernel/syscall_table.S
+++ b/arch/m32r/kernel/syscall_table.S
@@ -324,3 +324,4 @@ ENTRY(sys_call_table)
.long sys_ni_syscall
.long sys_eventfd
.long sys_fallocate
+ .long sys_setns /* 325 */
diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S
index c194d64cdbb..018e4a711d7 100644
--- a/arch/m32r/kernel/vmlinux.lds.S
+++ b/arch/m32r/kernel/vmlinux.lds.S
@@ -44,6 +44,7 @@ SECTIONS
EXCEPTION_TABLE(16)
NOTES
+ _sdata = .; /* Start of data section */
RODATA
RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
_edata = .; /* End of data section */
@@ -53,7 +54,7 @@ SECTIONS
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
- PERCPU(32, PAGE_SIZE)
+ PERCPU_SECTION(32)
. = ALIGN(PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c
index 5d2858f6eed..2c468e8b585 100644
--- a/arch/m32r/mm/discontig.c
+++ b/arch/m32r/mm/discontig.c
@@ -149,6 +149,7 @@ unsigned long __init zone_sizes_init(void)
zholes_size[ZONE_DMA] = mp->holes;
holes += zholes_size[ZONE_DMA];
+ node_set_state(nid, N_NORMAL_MEMORY);
free_area_init_node(nid, zones_size, start_pfn, zholes_size);
}
diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c
index 73e2205ebf5..78b660e903d 100644
--- a/arch/m32r/mm/init.c
+++ b/arch/m32r/mm/init.c
@@ -35,8 +35,6 @@ extern char __init_begin, __init_end;
pgd_t swapper_pg_dir[1024];
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-
/*
* Cache of MMU context last used.
*/