aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/mm/mem_encrypt_boot.S
blob: 730e6d541df1d6c172f553032bc00d8b0f5b312a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
/*
 * AMD Memory Encryption Support
 *
 * Copyright (C) 2016 Advanced Micro Devices, Inc.
 *
 * Author: Tom Lendacky <thomas.lendacky@amd.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/linkage.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/processor-flags.h>
#include <asm/msr-index.h>

	.text
	.code64
ENTRY(sme_encrypt_execute)

	/*
	 * Entry parameters:
	 *   RDI - virtual address for the encrypted kernel mapping
	 *   RSI - virtual address for the decrypted kernel mapping
	 *   RDX - length of kernel
	 *   RCX - virtual address of the encryption workarea, including:
	 *     - stack page (PAGE_SIZE)
	 *     - encryption routine page (PAGE_SIZE)
	 *     - intermediate copy buffer (PMD_PAGE_SIZE)
	 *    R8 - physcial address of the pagetables to use for encryption
	 */

	push	%rbp
	movq	%rsp, %rbp		/* RBP now has original stack pointer */

	/* Set up a one page stack in the non-encrypted memory area */
	movq	%rcx, %rax		/* Workarea stack page */
	leaq	PAGE_SIZE(%rax), %rsp	/* Set new stack pointer */
	addq	$PAGE_SIZE, %rax	/* Workarea encryption routine */

	push	%r12
	movq	%rdi, %r10		/* Encrypted kernel */
	movq	%rsi, %r11		/* Decrypted kernel */
	movq	%rdx, %r12		/* Kernel length */

	/* Copy encryption routine into the workarea */
	movq	%rax, %rdi				/* Workarea encryption routine */
	leaq	__enc_copy(%rip), %rsi			/* Encryption routine */
	movq	$(.L__enc_copy_end - __enc_copy), %rcx	/* Encryption routine length */
	rep	movsb

	/* Setup registers for call */
	movq	%r10, %rdi		/* Encrypted kernel */
	movq	%r11, %rsi		/* Decrypted kernel */
	movq	%r8, %rdx		/* Pagetables used for encryption */
	movq	%r12, %rcx		/* Kernel length */
	movq	%rax, %r8		/* Workarea encryption routine */
	addq	$PAGE_SIZE, %r8		/* Workarea intermediate copy buffer */

	call	*%rax			/* Call the encryption routine */

	pop	%r12

	movq	%rbp, %rsp		/* Restore original stack pointer */
	pop	%rbp

	ret
ENDPROC(sme_encrypt_execute)

ENTRY(__enc_copy)
/*
 * Routine used to encrypt kernel.
 *   This routine must be run outside of the kernel proper since
 *   the kernel will be encrypted during the process. So this
 *   routine is defined here and then copied to an area outside
 *   of the kernel where it will remain and run decrypted
 *   during execution.
 *
 *   On entry the registers must be:
 *     RDI - virtual address for the encrypted kernel mapping
 *     RSI - virtual address for the decrypted kernel mapping
 *     RDX - address of the pagetables to use for encryption
 *     RCX - length of kernel
 *      R8 - intermediate copy buffer
 *
 *     RAX - points to this routine
 *
 * The kernel will be encrypted by copying from the non-encrypted
 * kernel space to an intermediate buffer and then copying from the
 * intermediate buffer back to the encrypted kernel space. The physical
 * addresses of the two kernel space mappings are the same which
 * results in the kernel being encrypted "in place".
 */
	/* Enable the new page tables */
	mov	%rdx, %cr3

	/* Flush any global TLBs */
	mov	%cr4, %rdx
	andq	$~X86_CR4_PGE, %rdx
	mov	%rdx, %cr4
	orq	$X86_CR4_PGE, %rdx
	mov	%rdx, %cr4

	/* Set the PAT register PA5 entry to write-protect */
	push	%rcx
	movl	$MSR_IA32_CR_PAT, %ecx
	rdmsr
	push	%rdx			/* Save original PAT value */
	andl	$0xffff00ff, %edx	/* Clear PA5 */
	orl	$0x00000500, %edx	/* Set PA5 to WP */
	wrmsr
	pop	%rdx			/* RDX contains original PAT value */
	pop	%rcx

	movq	%rcx, %r9		/* Save kernel length */
	movq	%rdi, %r10		/* Save encrypted kernel address */
	movq	%rsi, %r11		/* Save decrypted kernel address */

	wbinvd				/* Invalidate any cache entries */

	/* Copy/encrypt 2MB at a time */
1:
	movq	%r11, %rsi		/* Source - decrypted kernel */
	movq	%r8, %rdi		/* Dest   - intermediate copy buffer */
	movq	$PMD_PAGE_SIZE, %rcx	/* 2MB length */
	rep	movsb

	movq	%r8, %rsi		/* Source - intermediate copy buffer */
	movq	%r10, %rdi		/* Dest   - encrypted kernel */
	movq	$PMD_PAGE_SIZE, %rcx	/* 2MB length */
	rep	movsb

	addq	$PMD_PAGE_SIZE, %r11
	addq	$PMD_PAGE_SIZE, %r10
	subq	$PMD_PAGE_SIZE, %r9	/* Kernel length decrement */
	jnz	1b			/* Kernel length not zero? */

	/* Restore PAT register */
	push	%rdx			/* Save original PAT value */
	movl	$MSR_IA32_CR_PAT, %ecx
	rdmsr
	pop	%rdx			/* Restore original PAT value */
	wrmsr

	ret
.L__enc_copy_end:
ENDPROC(__enc_copy)