aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/dma-mapping.c286
-rw-r--r--arch/arm64/mm/fault.c31
-rw-r--r--arch/arm64/mm/init.c34
-rw-r--r--arch/arm64/mm/kasan_init.c57
-rw-r--r--arch/arm64/mm/mmu.c13
-rw-r--r--arch/arm64/mm/proc.S8
6 files changed, 92 insertions, 337 deletions
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index a53704406099..fb0908456a1f 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -33,113 +33,6 @@
#include <asm/cacheflush.h>
-static struct gen_pool *atomic_pool __ro_after_init;
-
-#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
-static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
-
-static int __init early_coherent_pool(char *p)
-{
- atomic_pool_size = memparse(p, &p);
- return 0;
-}
-early_param("coherent_pool", early_coherent_pool);
-
-static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
-{
- unsigned long val;
- void *ptr = NULL;
-
- if (!atomic_pool) {
- WARN(1, "coherent pool not initialised!\n");
- return NULL;
- }
-
- val = gen_pool_alloc(atomic_pool, size);
- if (val) {
- phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
-
- *ret_page = phys_to_page(phys);
- ptr = (void *)val;
- memset(ptr, 0, size);
- }
-
- return ptr;
-}
-
-static bool __in_atomic_pool(void *start, size_t size)
-{
- return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
-}
-
-static int __free_from_pool(void *start, size_t size)
-{
- if (!__in_atomic_pool(start, size))
- return 0;
-
- gen_pool_free(atomic_pool, (unsigned long)start, size);
-
- return 1;
-}
-
-void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flags, unsigned long attrs)
-{
- struct page *page;
- void *ptr, *coherent_ptr;
- pgprot_t prot = pgprot_writecombine(PAGE_KERNEL);
-
- size = PAGE_ALIGN(size);
-
- if (!gfpflags_allow_blocking(flags)) {
- struct page *page = NULL;
- void *addr = __alloc_from_pool(size, &page, flags);
-
- if (addr)
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
-
- return addr;
- }
-
- ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
- if (!ptr)
- goto no_mem;
-
- /* remove any dirty cache lines on the kernel alias */
- __dma_flush_area(ptr, size);
-
- /* create a coherent mapping */
- page = virt_to_page(ptr);
- coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
- prot, __builtin_return_address(0));
- if (!coherent_ptr)
- goto no_map;
-
- return coherent_ptr;
-
-no_map:
- dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs);
-no_mem:
- return NULL;
-}
-
-void arch_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, unsigned long attrs)
-{
- if (!__free_from_pool(vaddr, PAGE_ALIGN(size))) {
- void *kaddr = phys_to_virt(dma_to_phys(dev, dma_handle));
-
- vunmap(vaddr);
- dma_direct_free_pages(dev, size, kaddr, dma_handle, attrs);
- }
-}
-
-long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
- dma_addr_t dma_addr)
-{
- return __phys_to_pfn(dma_to_phys(dev, dma_addr));
-}
-
pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
unsigned long attrs)
{
@@ -160,6 +53,11 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
__dma_unmap_area(phys_to_virt(paddr), size, dir);
}
+void arch_dma_prep_coherent(struct page *page, size_t size)
+{
+ __dma_flush_area(page_address(page), size);
+}
+
#ifdef CONFIG_IOMMU_DMA
static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
struct page *page, size_t size)
@@ -191,167 +89,13 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
}
#endif /* CONFIG_IOMMU_DMA */
-static int __init atomic_pool_init(void)
-{
- pgprot_t prot = __pgprot(PROT_NORMAL_NC);
- unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
- struct page *page;
- void *addr;
- unsigned int pool_size_order = get_order(atomic_pool_size);
-
- if (dev_get_cma_area(NULL))
- page = dma_alloc_from_contiguous(NULL, nr_pages,
- pool_size_order, false);
- else
- page = alloc_pages(GFP_DMA32, pool_size_order);
-
- if (page) {
- int ret;
- void *page_addr = page_address(page);
-
- memset(page_addr, 0, atomic_pool_size);
- __dma_flush_area(page_addr, atomic_pool_size);
-
- atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
- if (!atomic_pool)
- goto free_page;
-
- addr = dma_common_contiguous_remap(page, atomic_pool_size,
- VM_USERMAP, prot, atomic_pool_init);
-
- if (!addr)
- goto destroy_genpool;
-
- ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
- page_to_phys(page),
- atomic_pool_size, -1);
- if (ret)
- goto remove_mapping;
-
- gen_pool_set_algo(atomic_pool,
- gen_pool_first_fit_order_align,
- NULL);
-
- pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
- atomic_pool_size / 1024);
- return 0;
- }
- goto out;
-
-remove_mapping:
- dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
-destroy_genpool:
- gen_pool_destroy(atomic_pool);
- atomic_pool = NULL;
-free_page:
- if (!dma_release_from_contiguous(NULL, page, nr_pages))
- __free_pages(page, pool_size_order);
-out:
- pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
- atomic_pool_size / 1024);
- return -ENOMEM;
-}
-
-/********************************************
- * The following APIs are for dummy DMA ops *
- ********************************************/
-
-static void *__dummy_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- unsigned long attrs)
-{
- return NULL;
-}
-
-static void __dummy_free(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
-{
-}
-
-static int __dummy_mmap(struct device *dev,
- struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
-{
- return -ENXIO;
-}
-
-static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- return 0;
-}
-
-static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
-}
-
-static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
-{
- return 0;
-}
-
-static void __dummy_unmap_sg(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
-}
-
-static void __dummy_sync_single(struct device *dev,
- dma_addr_t dev_addr, size_t size,
- enum dma_data_direction dir)
-{
-}
-
-static void __dummy_sync_sg(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir)
-{
-}
-
-static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
-{
- return 1;
-}
-
-static int __dummy_dma_supported(struct device *hwdev, u64 mask)
-{
- return 0;
-}
-
-const struct dma_map_ops dummy_dma_ops = {
- .alloc = __dummy_alloc,
- .free = __dummy_free,
- .mmap = __dummy_mmap,
- .map_page = __dummy_map_page,
- .unmap_page = __dummy_unmap_page,
- .map_sg = __dummy_map_sg,
- .unmap_sg = __dummy_unmap_sg,
- .sync_single_for_cpu = __dummy_sync_single,
- .sync_single_for_device = __dummy_sync_single,
- .sync_sg_for_cpu = __dummy_sync_sg,
- .sync_sg_for_device = __dummy_sync_sg,
- .mapping_error = __dummy_mapping_error,
- .dma_supported = __dummy_dma_supported,
-};
-EXPORT_SYMBOL(dummy_dma_ops);
-
static int __init arm64_dma_init(void)
{
WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(),
TAINT_CPU_OUT_OF_SPEC,
"ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
ARCH_DMA_MINALIGN, cache_line_size());
-
- return atomic_pool_init();
+ return dma_atomic_pool_init(GFP_DMA32, __pgprot(PROT_NORMAL_NC));
}
arch_initcall(arm64_dma_init);
@@ -397,17 +141,17 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
page = alloc_pages(gfp, get_order(size));
addr = page ? page_address(page) : NULL;
} else {
- addr = __alloc_from_pool(size, &page, gfp);
+ addr = dma_alloc_from_pool(size, &page, gfp);
}
if (!addr)
return NULL;
*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
- if (iommu_dma_mapping_error(dev, *handle)) {
+ if (*handle == DMA_MAPPING_ERROR) {
if (coherent)
__free_pages(page, get_order(size));
else
- __free_from_pool(addr, size);
+ dma_free_from_pool(addr, size);
addr = NULL;
}
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
@@ -420,7 +164,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
return NULL;
*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
- if (iommu_dma_mapping_error(dev, *handle)) {
+ if (*handle == DMA_MAPPING_ERROR) {
dma_release_from_contiguous(dev, page,
size >> PAGE_SHIFT);
return NULL;
@@ -471,9 +215,9 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
* coherent devices.
* Hence how dodgy the below logic looks...
*/
- if (__in_atomic_pool(cpu_addr, size)) {
+ if (dma_in_atomic_pool(cpu_addr, size)) {
iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
- __free_from_pool(cpu_addr, size);
+ dma_free_from_pool(cpu_addr, size);
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
struct page *page = vmalloc_to_page(cpu_addr);
@@ -580,7 +324,7 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- !iommu_dma_mapping_error(dev, dev_addr))
+ dev_addr != DMA_MAPPING_ERROR)
__dma_map_area(page_address(page) + offset, size, dir);
return dev_addr;
@@ -663,7 +407,6 @@ static const struct dma_map_ops iommu_dma_ops = {
.sync_sg_for_device = __iommu_sync_sg_for_device,
.map_resource = iommu_dma_map_resource,
.unmap_resource = iommu_dma_unmap_resource,
- .mapping_error = iommu_dma_mapping_error,
};
static int __init __iommu_dma_init(void)
@@ -719,9 +462,6 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
- if (!dev->dma_ops)
- dev->dma_ops = &swiotlb_dma_ops;
-
dev->dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 5fe6d2e40e9b..efb7b2cbead5 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -40,6 +40,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
+#include <asm/kasan.h>
#include <asm/sysreg.h>
#include <asm/system_misc.h>
#include <asm/pgtable.h>
@@ -132,6 +133,18 @@ static void mem_abort_decode(unsigned int esr)
data_abort_decode(esr);
}
+static inline bool is_ttbr0_addr(unsigned long addr)
+{
+ /* entry assembly clears tags for TTBR0 addrs */
+ return addr < TASK_SIZE;
+}
+
+static inline bool is_ttbr1_addr(unsigned long addr)
+{
+ /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
+ return arch_kasan_reset_tag(addr) >= VA_START;
+}
+
/*
* Dump out the page tables associated with 'addr' in the currently active mm.
*/
@@ -141,7 +154,7 @@ void show_pte(unsigned long addr)
pgd_t *pgdp;
pgd_t pgd;
- if (addr < TASK_SIZE) {
+ if (is_ttbr0_addr(addr)) {
/* TTBR0 */
mm = current->active_mm;
if (mm == &init_mm) {
@@ -149,7 +162,7 @@ void show_pte(unsigned long addr)
addr);
return;
}
- } else if (addr >= VA_START) {
+ } else if (is_ttbr1_addr(addr)) {
/* TTBR1 */
mm = &init_mm;
} else {
@@ -254,7 +267,7 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
if (fsc_type == ESR_ELx_FSC_PERM)
return true;
- if (addr < TASK_SIZE && system_uses_ttbr0_pan())
+ if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan())
return fsc_type == ESR_ELx_FSC_FAULT &&
(regs->pstate & PSR_PAN_BIT);
@@ -319,7 +332,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr)
* type", so we ignore this wrinkle and just return the translation
* fault.)
*/
- if (current->thread.fault_address >= TASK_SIZE) {
+ if (!is_ttbr0_addr(current->thread.fault_address)) {
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_DABT_LOW:
/*
@@ -455,7 +468,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
- if (addr < TASK_SIZE && is_el1_permission_fault(addr, esr, regs)) {
+ if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die_kernel_fault("access to user memory with fs=KERNEL_DS",
@@ -603,7 +616,7 @@ static int __kprobes do_translation_fault(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
{
- if (addr < TASK_SIZE)
+ if (is_ttbr0_addr(addr))
return do_page_fault(addr, esr, regs);
do_bad_area(addr, esr, regs);
@@ -758,7 +771,7 @@ asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
* re-enabled IRQs. If the address is a kernel address, apply
* BP hardening prior to enabling IRQs and pre-emption.
*/
- if (addr > TASK_SIZE)
+ if (!is_ttbr0_addr(addr))
arm64_apply_bp_hardening();
local_daif_restore(DAIF_PROCCTX);
@@ -771,7 +784,7 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
struct pt_regs *regs)
{
if (user_mode(regs)) {
- if (instruction_pointer(regs) > TASK_SIZE)
+ if (!is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening();
local_daif_restore(DAIF_PROCCTX);
}
@@ -825,7 +838,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
if (interrupts_enabled(regs))
trace_hardirqs_off();
- if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE)
+ if (user_mode(regs) && !is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening();
if (!inf->fn(addr, esr, regs)) {
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index cbba537ba3d2..a8f2e4792ef9 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -63,24 +63,6 @@ EXPORT_SYMBOL(memstart_addr);
phys_addr_t arm64_dma_phys_limit __ro_after_init;
-#ifdef CONFIG_BLK_DEV_INITRD
-static int __init early_initrd(char *p)
-{
- unsigned long start, size;
- char *endp;
-
- start = memparse(p, &endp);
- if (*endp == ',') {
- size = memparse(endp + 1, NULL);
-
- initrd_start = start;
- initrd_end = start + size;
- }
- return 0;
-}
-early_param("initrd", early_initrd);
-#endif
-
#ifdef CONFIG_KEXEC_CORE
/*
* reserve_crashkernel() - reserves memory for crash kernel
@@ -417,14 +399,14 @@ void __init arm64_memblock_init(void)
memblock_add(__pa_symbol(_text), (u64)(_end - _text));
}
- if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) {
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
/*
* Add back the memory we just removed if it results in the
* initrd to become inaccessible via the linear mapping.
* Otherwise, this is a no-op
*/
- u64 base = initrd_start & PAGE_MASK;
- u64 size = PAGE_ALIGN(initrd_end) - base;
+ u64 base = phys_initrd_start & PAGE_MASK;
+ u64 size = PAGE_ALIGN(phys_initrd_size);
/*
* We can only add back the initrd memory if we don't end up
@@ -468,15 +450,11 @@ void __init arm64_memblock_init(void)
* pagetables with memblock.
*/
memblock_reserve(__pa_symbol(_text), _end - _text);
-#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_start) {
- memblock_reserve(initrd_start, initrd_end - initrd_start);
-
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
/* the generic initrd code expects virtual addresses */
- initrd_start = __phys_to_virt(initrd_start);
- initrd_end = __phys_to_virt(initrd_end);
+ initrd_start = __phys_to_virt(phys_initrd_start);
+ initrd_end = initrd_start + phys_initrd_size;
}
-#endif
early_init_fdt_scan_reserved_mem();
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 63527e585aac..4b55b15707a3 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -39,7 +39,15 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
{
void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
- MEMBLOCK_ALLOC_ACCESSIBLE, node);
+ MEMBLOCK_ALLOC_KASAN, node);
+ return __pa(p);
+}
+
+static phys_addr_t __init kasan_alloc_raw_page(int node)
+{
+ void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS),
+ MEMBLOCK_ALLOC_KASAN, node);
return __pa(p);
}
@@ -47,8 +55,9 @@ static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
bool early)
{
if (pmd_none(READ_ONCE(*pmdp))) {
- phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte)
- : kasan_alloc_zeroed_page(node);
+ phys_addr_t pte_phys = early ?
+ __pa_symbol(kasan_early_shadow_pte)
+ : kasan_alloc_zeroed_page(node);
__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
}
@@ -60,8 +69,9 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
bool early)
{
if (pud_none(READ_ONCE(*pudp))) {
- phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd)
- : kasan_alloc_zeroed_page(node);
+ phys_addr_t pmd_phys = early ?
+ __pa_symbol(kasan_early_shadow_pmd)
+ : kasan_alloc_zeroed_page(node);
__pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
}
@@ -72,8 +82,9 @@ static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
bool early)
{
if (pgd_none(READ_ONCE(*pgdp))) {
- phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud)
- : kasan_alloc_zeroed_page(node);
+ phys_addr_t pud_phys = early ?
+ __pa_symbol(kasan_early_shadow_pud)
+ : kasan_alloc_zeroed_page(node);
__pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
}
@@ -87,8 +98,11 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
do {
- phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page)
- : kasan_alloc_zeroed_page(node);
+ phys_addr_t page_phys = early ?
+ __pa_symbol(kasan_early_shadow_page)
+ : kasan_alloc_raw_page(node);
+ if (!early)
+ memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
next = addr + PAGE_SIZE;
set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
} while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
@@ -205,14 +219,14 @@ void __init kasan_init(void)
kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
- kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
- (void *)mod_shadow_start);
- kasan_populate_zero_shadow((void *)kimg_shadow_end,
- kasan_mem_to_shadow((void *)PAGE_OFFSET));
+ kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
+ (void *)mod_shadow_start);
+ kasan_populate_early_shadow((void *)kimg_shadow_end,
+ kasan_mem_to_shadow((void *)PAGE_OFFSET));
if (kimg_shadow_start > mod_shadow_end)
- kasan_populate_zero_shadow((void *)mod_shadow_end,
- (void *)kimg_shadow_start);
+ kasan_populate_early_shadow((void *)mod_shadow_end,
+ (void *)kimg_shadow_start);
for_each_memblock(memory, reg) {
void *start = (void *)__phys_to_virt(reg->base);
@@ -227,16 +241,19 @@ void __init kasan_init(void)
}
/*
- * KAsan may reuse the contents of kasan_zero_pte directly, so we
- * should make sure that it maps the zero page read-only.
+ * KAsan may reuse the contents of kasan_early_shadow_pte directly,
+ * so we should make sure that it maps the zero page read-only.
*/
for (i = 0; i < PTRS_PER_PTE; i++)
- set_pte(&kasan_zero_pte[i],
- pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
+ set_pte(&kasan_early_shadow_pte[i],
+ pfn_pte(sym_to_pfn(kasan_early_shadow_page),
+ PAGE_KERNEL_RO));
- memset(kasan_zero_page, 0, PAGE_SIZE);
+ memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+ kasan_init_tags();
+
/* At this point kasan is fully initialized. Enable error messages */
init_task.kasan_depth = 0;
pr_info("KernelAddressSanitizer initialized\n");
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index da513a1facf4..b6f5aa52ac67 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1003,10 +1003,8 @@ int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
pmd = READ_ONCE(*pmdp);
- if (!pmd_present(pmd))
- return 1;
if (!pmd_table(pmd)) {
- VM_WARN_ON(!pmd_table(pmd));
+ VM_WARN_ON(1);
return 1;
}
@@ -1026,10 +1024,8 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
pud = READ_ONCE(*pudp);
- if (!pud_present(pud))
- return 1;
if (!pud_table(pud)) {
- VM_WARN_ON(!pud_table(pud));
+ VM_WARN_ON(1);
return 1;
}
@@ -1047,6 +1043,11 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
return 1;
}
+int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
+{
+ return 0; /* Don't attempt a block mapping */
+}
+
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
bool want_memblock)
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index e05b3ce1db6b..73886a5f1f30 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -47,6 +47,12 @@
/* PTWs cacheable, inner/outer WBWA */
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
+#ifdef CONFIG_KASAN_SW_TAGS
+#define TCR_KASAN_FLAGS TCR_TBI1
+#else
+#define TCR_KASAN_FLAGS 0
+#endif
+
#define MAIR(attr, mt) ((attr) << ((mt) * 8))
/*
@@ -449,7 +455,7 @@ ENTRY(__cpu_setup)
*/
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
- TCR_TBI0 | TCR_A1
+ TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
#ifdef CONFIG_ARM64_USER_VA_BITS_52
ldr_l x9, vabits_user