summaryrefslogtreecommitdiff
path: root/tools/testing/selftests/kvm/s390x/tprot.c
blob: c097b9db495e469048ca7c822e2931030aad9759 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Test TEST PROTECTION emulation.
 *
 * Copyright IBM Corp. 2021
 */

#include <sys/mman.h>
#include "test_util.h"
#include "kvm_util.h"

#define PAGE_SHIFT 12
#define PAGE_SIZE (1 << PAGE_SHIFT)
#define CR0_FETCH_PROTECTION_OVERRIDE	(1UL << (63 - 38))
#define CR0_STORAGE_PROTECTION_OVERRIDE	(1UL << (63 - 39))

#define VCPU_ID 1

static __aligned(PAGE_SIZE) uint8_t pages[2][PAGE_SIZE];
static uint8_t *const page_store_prot = pages[0];
static uint8_t *const page_fetch_prot = pages[1];

/* Nonzero return value indicates that address not mapped */
static int set_storage_key(void *addr, uint8_t key)
{
	int not_mapped = 0;

	asm volatile (
		       "lra	%[addr], 0(0,%[addr])\n"
		"	jz	0f\n"
		"	llill	%[not_mapped],1\n"
		"	j	1f\n"
		"0:	sske	%[key], %[addr]\n"
		"1:"
		: [addr] "+&a" (addr), [not_mapped] "+r" (not_mapped)
		: [key] "r" (key)
		: "cc"
	);
	return -not_mapped;
}

enum permission {
	READ_WRITE = 0,
	READ = 1,
	RW_PROTECTED = 2,
	TRANSL_UNAVAIL = 3,
};

static enum permission test_protection(void *addr, uint8_t key)
{
	uint64_t mask;

	asm volatile (
		       "tprot	%[addr], 0(%[key])\n"
		"	ipm	%[mask]\n"
		: [mask] "=r" (mask)
		: [addr] "Q" (*(char *)addr),
		  [key] "a" (key)
		: "cc"
	);

	return (enum permission)(mask >> 28);
}

enum stage {
	STAGE_END,
	STAGE_INIT_SIMPLE,
	TEST_SIMPLE,
	STAGE_INIT_FETCH_PROT_OVERRIDE,
	TEST_FETCH_PROT_OVERRIDE,
	TEST_STORAGE_PROT_OVERRIDE,
};

struct test {
	enum stage stage;
	void *addr;
	uint8_t key;
	enum permission expected;
} tests[] = {
	/*
	 * We perform each test in the array by executing TEST PROTECTION on
	 * the specified addr with the specified key and checking if the returned
	 * permissions match the expected value.
	 * Both guest and host cooperate to set up the required test conditions.
	 * A central condition is that the page targeted by addr has to be DAT
	 * protected in the host mappings, in order for KVM to emulate the
	 * TEST PROTECTION instruction.
	 * Since the page tables are shared, the host uses mprotect to achieve
	 * this.
	 *
	 * Test resulting in RW_PROTECTED/TRANSL_UNAVAIL will be interpreted
	 * by SIE, not KVM, but there is no harm in testing them also.
	 * See Enhanced Suppression-on-Protection Facilities in the
	 * Interpretive-Execution Mode
	 */
	/*
	 * guest: set storage key of page_store_prot to 1
	 *        storage key of page_fetch_prot to 9 and enable
	 *        protection for it
	 * STAGE_INIT_SIMPLE
	 * host: write protect both via mprotect
	 */
	/* access key 0 matches any storage key -> RW */
	{ TEST_SIMPLE, page_store_prot, 0x00, READ_WRITE },
	/* access key matches storage key -> RW */
	{ TEST_SIMPLE, page_store_prot, 0x10, READ_WRITE },
	/* mismatched keys, but no fetch protection -> RO */
	{ TEST_SIMPLE, page_store_prot, 0x20, READ },
	/* access key 0 matches any storage key -> RW */
	{ TEST_SIMPLE, page_fetch_prot, 0x00, READ_WRITE },
	/* access key matches storage key -> RW */
	{ TEST_SIMPLE, page_fetch_prot, 0x90, READ_WRITE },
	/* mismatched keys, fetch protection -> inaccessible */
	{ TEST_SIMPLE, page_fetch_prot, 0x10, RW_PROTECTED },
	/* page 0 not mapped yet -> translation not available */
	{ TEST_SIMPLE, (void *)0x00, 0x10, TRANSL_UNAVAIL },
	/*
	 * host: try to map page 0
	 * guest: set storage key of page 0 to 9 and enable fetch protection
	 * STAGE_INIT_FETCH_PROT_OVERRIDE
	 * host: write protect page 0
	 *       enable fetch protection override
	 */
	/* mismatched keys, fetch protection, but override applies -> RO */
	{ TEST_FETCH_PROT_OVERRIDE, (void *)0x00, 0x10, READ },
	/* mismatched keys, fetch protection, override applies to 0-2048 only -> inaccessible */
	{ TEST_FETCH_PROT_OVERRIDE, (void *)2049, 0x10, RW_PROTECTED },
	/*
	 * host: enable storage protection override
	 */
	/* mismatched keys, but override applies (storage key 9) -> RW */
	{ TEST_STORAGE_PROT_OVERRIDE, page_fetch_prot, 0x10, READ_WRITE },
	/* mismatched keys, no fetch protection, override doesn't apply -> RO */
	{ TEST_STORAGE_PROT_OVERRIDE, page_store_prot, 0x20, READ },
	/* mismatched keys, but override applies (storage key 9) -> RW */
	{ TEST_STORAGE_PROT_OVERRIDE, (void *)2049, 0x10, READ_WRITE },
	/* end marker */
	{ STAGE_END, 0, 0, 0 },
};

static enum stage perform_next_stage(int *i, bool mapped_0)
{
	enum stage stage = tests[*i].stage;
	enum permission result;
	bool skip;

	for (; tests[*i].stage == stage; (*i)++) {
		/*
		 * Some fetch protection override tests require that page 0
		 * be mapped, however, when the hosts tries to map that page via
		 * vm_vaddr_alloc, it may happen that some other page gets mapped
		 * instead.
		 * In order to skip these tests we detect this inside the guest
		 */
		skip = tests[*i].addr < (void *)4096 &&
		       tests[*i].expected != TRANSL_UNAVAIL &&
		       !mapped_0;
		if (!skip) {
			result = test_protection(tests[*i].addr, tests[*i].key);
			GUEST_ASSERT_2(result == tests[*i].expected, *i, result);
		}
	}
	return stage;
}

static void guest_code(void)
{
	bool mapped_0;
	int i = 0;

	GUEST_ASSERT_EQ(set_storage_key(page_store_prot, 0x10), 0);
	GUEST_ASSERT_EQ(set_storage_key(page_fetch_prot, 0x98), 0);
	GUEST_SYNC(STAGE_INIT_SIMPLE);
	GUEST_SYNC(perform_next_stage(&i, false));

	/* Fetch-protection override */
	mapped_0 = !set_storage_key((void *)0, 0x98);
	GUEST_SYNC(STAGE_INIT_FETCH_PROT_OVERRIDE);
	GUEST_SYNC(perform_next_stage(&i, mapped_0));

	/* Storage-protection override */
	GUEST_SYNC(perform_next_stage(&i, mapped_0));
}

#define HOST_SYNC(vmp, stage)							\
({										\
	struct kvm_vm *__vm = (vmp);						\
	struct ucall uc;							\
	int __stage = (stage);							\
										\
	vcpu_run(__vm, VCPU_ID);						\
	get_ucall(__vm, VCPU_ID, &uc);						\
	if (uc.cmd == UCALL_ABORT) {						\
		TEST_FAIL("line %lu: %s, hints: %lu, %lu", uc.args[1],		\
			  (const char *)uc.args[0], uc.args[2], uc.args[3]);	\
	}									\
	ASSERT_EQ(uc.cmd, UCALL_SYNC);						\
	ASSERT_EQ(uc.args[1], __stage);						\
})

int main(int argc, char *argv[])
{
	struct kvm_vm *vm;
	struct kvm_run *run;
	vm_vaddr_t guest_0_page;

	vm = vm_create_default(VCPU_ID, 0, guest_code);
	run = vcpu_state(vm, VCPU_ID);

	HOST_SYNC(vm, STAGE_INIT_SIMPLE);
	mprotect(addr_gva2hva(vm, (vm_vaddr_t)pages), PAGE_SIZE * 2, PROT_READ);
	HOST_SYNC(vm, TEST_SIMPLE);

	guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0);
	if (guest_0_page != 0)
		print_skip("Did not allocate page at 0 for fetch protection override tests");
	HOST_SYNC(vm, STAGE_INIT_FETCH_PROT_OVERRIDE);
	if (guest_0_page == 0)
		mprotect(addr_gva2hva(vm, (vm_vaddr_t)0), PAGE_SIZE, PROT_READ);
	run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
	run->kvm_dirty_regs = KVM_SYNC_CRS;
	HOST_SYNC(vm, TEST_FETCH_PROT_OVERRIDE);

	run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
	run->kvm_dirty_regs = KVM_SYNC_CRS;
	HOST_SYNC(vm, TEST_STORAGE_PROT_OVERRIDE);
}