aboutsummaryrefslogtreecommitdiff
path: root/arch/parisc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r--arch/parisc/kernel/cache.c166
-rw-r--r--arch/parisc/kernel/head.S4
-rw-r--r--arch/parisc/kernel/inventory.c1
-rw-r--r--arch/parisc/kernel/sys_parisc.c25
-rw-r--r--arch/parisc/kernel/traps.c6
5 files changed, 99 insertions, 103 deletions
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 65fb4cbc3a0..ac87a40502e 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -71,18 +71,27 @@ flush_cache_all_local(void)
}
EXPORT_SYMBOL(flush_cache_all_local);
+/* Virtual address of pfn. */
+#define pfn_va(pfn) __va(PFN_PHYS(pfn))
+
void
update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
- struct page *page = pte_page(*ptep);
+ unsigned long pfn = pte_pfn(*ptep);
+ struct page *page;
- if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
- test_bit(PG_dcache_dirty, &page->flags)) {
+ /* We don't have pte special. As a result, we can be called with
+ an invalid pfn and we don't need to flush the kernel dcache page.
+ This occurs with FireGL card in C8000. */
+ if (!pfn_valid(pfn))
+ return;
- flush_kernel_dcache_page(page);
+ page = pfn_to_page(pfn);
+ if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
+ flush_kernel_dcache_page_addr(pfn_va(pfn));
clear_bit(PG_dcache_dirty, &page->flags);
} else if (parisc_requires_coherency())
- flush_kernel_dcache_page(page);
+ flush_kernel_dcache_page_addr(pfn_va(pfn));
}
void
@@ -379,41 +388,20 @@ void flush_kernel_dcache_page_addr(void *addr)
}
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
-void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
-{
- clear_page_asm(vto);
- if (!parisc_requires_coherency())
- flush_kernel_dcache_page_asm(vto);
-}
-EXPORT_SYMBOL(clear_user_page);
-
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *pg)
{
- /* Copy using kernel mapping. No coherency is needed
- (all in kmap/kunmap) on machines that don't support
- non-equivalent aliasing. However, the `from' page
- needs to be flushed before it can be accessed through
- the kernel mapping. */
+ /* Copy using kernel mapping. No coherency is needed (all in
+ kunmap) for the `to' page. However, the `from' page needs to
+ be flushed through a mapping equivalent to the user mapping
+ before it can be accessed through the kernel mapping. */
preempt_disable();
flush_dcache_page_asm(__pa(vfrom), vaddr);
preempt_enable();
copy_page_asm(vto, vfrom);
- if (!parisc_requires_coherency())
- flush_kernel_dcache_page_asm(vto);
}
EXPORT_SYMBOL(copy_user_page);
-#ifdef CONFIG_PA8X00
-
-void kunmap_parisc(void *addr)
-{
- if (parisc_requires_coherency())
- flush_kernel_dcache_page_addr(addr);
-}
-EXPORT_SYMBOL(kunmap_parisc);
-#endif
-
void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{
unsigned long flags;
@@ -440,8 +428,8 @@ void __flush_tlb_range(unsigned long sid, unsigned long start,
else {
unsigned long flags;
- mtsp(sid, 1);
purge_tlb_start(flags);
+ mtsp(sid, 1);
if (split_tlb) {
while (npages--) {
pdtlb(start);
@@ -495,44 +483,42 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
void flush_cache_mm(struct mm_struct *mm)
{
+ struct vm_area_struct *vma;
+ pgd_t *pgd;
+
/* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */
- if (mm_total_size(mm) < parisc_cache_flush_threshold) {
- struct vm_area_struct *vma;
-
- if (mm->context == mfsp(3)) {
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- flush_user_dcache_range_asm(vma->vm_start,
- vma->vm_end);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(
- vma->vm_start, vma->vm_end);
- }
- } else {
- pgd_t *pgd = mm->pgd;
-
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- unsigned long addr;
-
- for (addr = vma->vm_start; addr < vma->vm_end;
- addr += PAGE_SIZE) {
- pte_t *ptep = get_ptep(pgd, addr);
- if (ptep != NULL) {
- pte_t pte = *ptep;
- __flush_cache_page(vma, addr,
- page_to_phys(pte_page(pte)));
- }
- }
- }
+ if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
+ flush_cache_all();
+ return;
+ }
+
+ if (mm->context == mfsp(3)) {
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
+ if ((vma->vm_flags & VM_EXEC) == 0)
+ continue;
+ flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
}
return;
}
-#ifdef CONFIG_SMP
- flush_cache_all();
-#else
- flush_cache_all_local();
-#endif
+ pgd = mm->pgd;
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ unsigned long addr;
+
+ for (addr = vma->vm_start; addr < vma->vm_end;
+ addr += PAGE_SIZE) {
+ unsigned long pfn;
+ pte_t *ptep = get_ptep(pgd, addr);
+ if (!ptep)
+ continue;
+ pfn = pte_pfn(*ptep);
+ if (!pfn_valid(pfn))
+ continue;
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+ }
+ }
}
void
@@ -556,33 +542,32 @@ flush_user_icache_range(unsigned long start, unsigned long end)
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
+ unsigned long addr;
+ pgd_t *pgd;
+
BUG_ON(!vma->vm_mm->context);
- if ((end - start) < parisc_cache_flush_threshold) {
- if (vma->vm_mm->context == mfsp(3)) {
- flush_user_dcache_range_asm(start, end);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(start, end);
- } else {
- unsigned long addr;
- pgd_t *pgd = vma->vm_mm->pgd;
-
- for (addr = start & PAGE_MASK; addr < end;
- addr += PAGE_SIZE) {
- pte_t *ptep = get_ptep(pgd, addr);
- if (ptep != NULL) {
- pte_t pte = *ptep;
- flush_cache_page(vma,
- addr, pte_pfn(pte));
- }
- }
- }
- } else {
-#ifdef CONFIG_SMP
+ if ((end - start) >= parisc_cache_flush_threshold) {
flush_cache_all();
-#else
- flush_cache_all_local();
-#endif
+ return;
+ }
+
+ if (vma->vm_mm->context == mfsp(3)) {
+ flush_user_dcache_range_asm(start, end);
+ if (vma->vm_flags & VM_EXEC)
+ flush_user_icache_range_asm(start, end);
+ return;
+ }
+
+ pgd = vma->vm_mm->pgd;
+ for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+ unsigned long pfn;
+ pte_t *ptep = get_ptep(pgd, addr);
+ if (!ptep)
+ continue;
+ pfn = pte_pfn(*ptep);
+ if (pfn_valid(pfn))
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
@@ -591,9 +576,10 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
{
BUG_ON(!vma->vm_mm->context);
- flush_tlb_page(vma, vmaddr);
- __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
-
+ if (pfn_valid(pfn)) {
+ flush_tlb_page(vma, vmaddr);
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ }
}
#ifdef CONFIG_PARISC_TMPALIAS
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index 37aabd772fb..d2d58258aea 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -195,6 +195,8 @@ common_stext:
ldw MEM_PDC_HI(%r0),%r6
depd %r6, 31, 32, %r3 /* move to upper word */
+ mfctl %cr30,%r6 /* PCX-W2 firmware bug */
+
ldo PDC_PSW(%r0),%arg0 /* 21 */
ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
@@ -203,6 +205,8 @@ common_stext:
copy %r0,%arg3
stext_pdc_ret:
+ mtctl %r6,%cr30 /* restore task thread info */
+
/* restore rfi target address*/
ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
tophys_r1 %r10
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 3295ef4a185..f0b6722fc70 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -211,6 +211,7 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
/* REVISIT: who is the consumer of this? not sure yet... */
dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
dev->pmod_loc = pa_pdc_cell->mod_location;
+ dev->mod0 = pa_pdc_cell->mod[0];
register_parisc_device(dev); /* advertise device */
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 5dfd248e3f1..0d3a9d4927b 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -61,8 +61,15 @@ static int get_offset(struct address_space *mapping)
return (unsigned long) mapping >> 8;
}
-static unsigned long get_shared_area(struct address_space *mapping,
- unsigned long addr, unsigned long len, unsigned long pgoff)
+static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff)
+{
+ struct address_space *mapping = filp ? filp->f_mapping : NULL;
+
+ return (get_offset(mapping) + pgoff) << PAGE_SHIFT;
+}
+
+static unsigned long get_shared_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff)
{
struct vm_unmapped_area_info info;
@@ -71,7 +78,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
info.low_limit = PAGE_ALIGN(addr);
info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & (SHMLBA - 1);
- info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
+ info.align_offset = shared_align_offset(filp, pgoff);
return vm_unmapped_area(&info);
}
@@ -82,20 +89,18 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
return -ENOMEM;
if (flags & MAP_FIXED) {
if ((flags & MAP_SHARED) &&
- (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+ (addr - shared_align_offset(filp, pgoff)) & (SHMLBA - 1))
return -EINVAL;
return addr;
}
if (!addr)
addr = TASK_UNMAPPED_BASE;
- if (filp) {
- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
- } else if(flags & MAP_SHARED) {
- addr = get_shared_area(NULL, addr, len, pgoff);
- } else {
+ if (filp || (flags & MAP_SHARED))
+ addr = get_shared_area(filp, addr, len, pgoff);
+ else
addr = get_unshared_area(addr, len);
- }
+
return addr;
}
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 04e47c6a456..b3f87a3b4bc 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -805,14 +805,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
else {
/*
- * The kernel should never fault on its own address space.
+ * The kernel should never fault on its own address space,
+ * unless pagefault_disable() was called before.
*/
- if (fault_space == 0)
+ if (fault_space == 0 && !in_atomic())
{
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
parisc_terminate("Kernel Fault", regs, code, fault_address);
-
}
}