aboutsummaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/mmu.c60
-rw-r--r--arch/x86/xen/xen-pvh.S47
2 files changed, 80 insertions, 27 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 2d76106788a3..96fc2f0fdbfe 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -63,37 +63,44 @@ static noinline void xen_flush_tlb_all(void)
#define REMAP_BATCH_SIZE 16
struct remap_data {
- xen_pfn_t *mfn;
+ xen_pfn_t *pfn;
bool contiguous;
+ bool no_translate;
pgprot_t prot;
struct mmu_update *mmu_update;
};
-static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
+static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token,
unsigned long addr, void *data)
{
struct remap_data *rmd = data;
- pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
+ pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
- /* If we have a contiguous range, just update the mfn itself,
- else update pointer to be "next mfn". */
+ /*
+ * If we have a contiguous range, just update the pfn itself,
+ * else update pointer to be "next pfn".
+ */
if (rmd->contiguous)
- (*rmd->mfn)++;
+ (*rmd->pfn)++;
else
- rmd->mfn++;
+ rmd->pfn++;
- rmd->mmu_update->ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
+ rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
+ rmd->mmu_update->ptr |= rmd->no_translate ?
+ MMU_PT_UPDATE_NO_TRANSLATE :
+ MMU_NORMAL_PT_UPDATE;
rmd->mmu_update->val = pte_val_ma(pte);
rmd->mmu_update++;
return 0;
}
-static int do_remap_gfn(struct vm_area_struct *vma,
+static int do_remap_pfn(struct vm_area_struct *vma,
unsigned long addr,
- xen_pfn_t *gfn, int nr,
+ xen_pfn_t *pfn, int nr,
int *err_ptr, pgprot_t prot,
- unsigned domid,
+ unsigned int domid,
+ bool no_translate,
struct page **pages)
{
int err = 0;
@@ -104,11 +111,14 @@ static int do_remap_gfn(struct vm_area_struct *vma,
BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
- rmd.mfn = gfn;
+ rmd.pfn = pfn;
rmd.prot = prot;
- /* We use the err_ptr to indicate if there we are doing a contiguous
- * mapping or a discontigious mapping. */
+ /*
+ * We use the err_ptr to indicate if there we are doing a contiguous
+ * mapping or a discontigious mapping.
+ */
rmd.contiguous = !err_ptr;
+ rmd.no_translate = no_translate;
while (nr) {
int index = 0;
@@ -119,7 +129,7 @@ static int do_remap_gfn(struct vm_area_struct *vma,
rmd.mmu_update = mmu_update;
err = apply_to_page_range(vma->vm_mm, addr, range,
- remap_area_mfn_pte_fn, &rmd);
+ remap_area_pfn_pte_fn, &rmd);
if (err)
goto out;
@@ -173,7 +183,8 @@ int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
if (xen_feature(XENFEAT_auto_translated_physmap))
return -EOPNOTSUPP;
- return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
+ return do_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
+ pages);
}
EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
@@ -192,10 +203,25 @@ int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
* cause of "wrong memory was mapped in".
*/
BUG_ON(err_ptr == NULL);
- return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
+ return do_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
+ false, pages);
}
EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
+int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+ unsigned long addr,
+ xen_pfn_t *mfn, int nr,
+ int *err_ptr, pgprot_t prot,
+ unsigned int domid, struct page **pages)
+{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return -EOPNOTSUPP;
+
+ return do_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
+ true, pages);
+}
+EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
+
/* Returns: 0 success */
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
int nr, struct page **pages)
diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S
index e1a5fbeae08d..ca2d3b2bf2af 100644
--- a/arch/x86/xen/xen-pvh.S
+++ b/arch/x86/xen/xen-pvh.S
@@ -54,12 +54,19 @@
* charge of setting up it's own stack, GDT and IDT.
*/
+#define PVH_GDT_ENTRY_CS 1
+#define PVH_GDT_ENTRY_DS 2
+#define PVH_GDT_ENTRY_CANARY 3
+#define PVH_CS_SEL (PVH_GDT_ENTRY_CS * 8)
+#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8)
+#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8)
+
ENTRY(pvh_start_xen)
cld
lgdt (_pa(gdt))
- mov $(__BOOT_DS),%eax
+ mov $PVH_DS_SEL,%eax
mov %eax,%ds
mov %eax,%es
mov %eax,%ss
@@ -93,11 +100,17 @@ ENTRY(pvh_start_xen)
mov %eax, %cr0
/* Jump to 64-bit mode. */
- ljmp $__KERNEL_CS, $_pa(1f)
+ ljmp $PVH_CS_SEL, $_pa(1f)
/* 64-bit entry point. */
.code64
1:
+ /* Set base address in stack canary descriptor. */
+ mov $MSR_GS_BASE,%ecx
+ mov $_pa(canary), %eax
+ xor %edx, %edx
+ wrmsr
+
call xen_prepare_pvh
/* startup_64 expects boot_params in %rsi. */
@@ -107,6 +120,17 @@ ENTRY(pvh_start_xen)
#else /* CONFIG_X86_64 */
+ /* Set base address in stack canary descriptor. */
+ movl $_pa(gdt_start),%eax
+ movl $_pa(canary),%ecx
+ movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax)
+ shrl $16, %ecx
+ movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax)
+ movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax)
+
+ mov $PVH_CANARY_SEL,%eax
+ mov %eax,%gs
+
call mk_early_pgtbl_32
mov $_pa(initial_page_table), %eax
@@ -116,13 +140,13 @@ ENTRY(pvh_start_xen)
or $(X86_CR0_PG | X86_CR0_PE), %eax
mov %eax, %cr0
- ljmp $__BOOT_CS, $1f
+ ljmp $PVH_CS_SEL, $1f
1:
call xen_prepare_pvh
mov $_pa(pvh_bootparams), %esi
/* startup_32 doesn't expect paging and PAE to be on. */
- ljmp $__BOOT_CS, $_pa(2f)
+ ljmp $PVH_CS_SEL, $_pa(2f)
2:
mov %cr0, %eax
and $~X86_CR0_PG, %eax
@@ -131,7 +155,7 @@ ENTRY(pvh_start_xen)
and $~X86_CR4_PAE, %eax
mov %eax, %cr4
- ljmp $__BOOT_CS, $_pa(startup_32)
+ ljmp $PVH_CS_SEL, $_pa(startup_32)
#endif
END(pvh_start_xen)
@@ -143,16 +167,19 @@ gdt:
.word 0
gdt_start:
.quad 0x0000000000000000 /* NULL descriptor */
- .quad 0x0000000000000000 /* reserved */
#ifdef CONFIG_X86_64
- .quad GDT_ENTRY(0xa09a, 0, 0xfffff) /* __KERNEL_CS */
+ .quad GDT_ENTRY(0xa09a, 0, 0xfffff) /* PVH_CS_SEL */
#else
- .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* __KERNEL_CS */
+ .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* PVH_CS_SEL */
#endif
- .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* __KERNEL_DS */
+ .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */
+ .quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */
gdt_end:
- .balign 4
+ .balign 16
+canary:
+ .fill 48, 1, 0
+
early_stack:
.fill 256, 1, 0
early_stack_end: