blob: 1fef52c17fc890e144a26ec475b38fb5f6ed1484 [file] [log] [blame]
Qiaowei Ren57319d82014-11-14 07:18:27 -08001/*
2 * mpx.c - Memory Protection eXtensions
3 *
4 * Copyright (c) 2014, Intel Corporation.
5 * Qiaowei Ren <qiaowei.ren@intel.com>
6 * Dave Hansen <dave.hansen@intel.com>
7 */
8#include <linux/kernel.h>
Dave Hansenfcc7ffd2014-11-14 07:18:28 -08009#include <linux/slab.h>
Qiaowei Ren57319d82014-11-14 07:18:27 -080010#include <linux/syscalls.h>
11#include <linux/sched/sysctl.h>
12
Dave Hansenfe3d1972014-11-14 07:18:29 -080013#include <asm/insn.h>
Qiaowei Ren57319d82014-11-14 07:18:27 -080014#include <asm/mman.h>
Dave Hansen1de4fa12014-11-14 07:18:31 -080015#include <asm/mmu_context.h>
Qiaowei Ren57319d82014-11-14 07:18:27 -080016#include <asm/mpx.h>
Dave Hansenfe3d1972014-11-14 07:18:29 -080017#include <asm/processor.h>
Ingo Molnar78f7f1e2015-04-24 02:54:44 +020018#include <asm/fpu/internal.h>
Qiaowei Ren57319d82014-11-14 07:18:27 -080019
Dave Hansene7126cf2015-06-07 11:37:03 -070020#define CREATE_TRACE_POINTS
21#include <asm/trace/mpx.h>
22
Qiaowei Ren57319d82014-11-14 07:18:27 -080023static const char *mpx_mapping_name(struct vm_area_struct *vma)
24{
25 return "[mpx]";
26}
27
28static struct vm_operations_struct mpx_vma_ops = {
29 .name = mpx_mapping_name,
30};
31
Dave Hansen1de4fa12014-11-14 07:18:31 -080032static int is_mpx_vma(struct vm_area_struct *vma)
33{
34 return (vma->vm_ops == &mpx_vma_ops);
35}
36
Qiaowei Ren57319d82014-11-14 07:18:27 -080037/*
38 * This is really a simplified "vm_mmap". it only handles MPX
39 * bounds tables (the bounds directory is user-allocated).
40 *
41 * Later on, we use the vma->vm_ops to uniquely identify these
42 * VMAs.
43 */
44static unsigned long mpx_mmap(unsigned long len)
45{
46 unsigned long ret;
47 unsigned long addr, pgoff;
48 struct mm_struct *mm = current->mm;
49 vm_flags_t vm_flags;
50 struct vm_area_struct *vma;
51
Dave Hanseneb099e52015-06-07 11:37:02 -070052 /* Only bounds table can be allocated here */
53 if (len != MPX_BT_SIZE_BYTES)
Qiaowei Ren57319d82014-11-14 07:18:27 -080054 return -EINVAL;
55
56 down_write(&mm->mmap_sem);
57
58 /* Too many mappings? */
59 if (mm->map_count > sysctl_max_map_count) {
60 ret = -ENOMEM;
61 goto out;
62 }
63
64 /* Obtain the address to map to. we verify (or select) it and ensure
65 * that it represents a valid section of the address space.
66 */
67 addr = get_unmapped_area(NULL, 0, len, 0, MAP_ANONYMOUS | MAP_PRIVATE);
68 if (addr & ~PAGE_MASK) {
69 ret = addr;
70 goto out;
71 }
72
73 vm_flags = VM_READ | VM_WRITE | VM_MPX |
74 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
75
76 /* Set pgoff according to addr for anon_vma */
77 pgoff = addr >> PAGE_SHIFT;
78
79 ret = mmap_region(NULL, addr, len, vm_flags, pgoff);
80 if (IS_ERR_VALUE(ret))
81 goto out;
82
83 vma = find_vma(mm, ret);
84 if (!vma) {
85 ret = -ENOMEM;
86 goto out;
87 }
88 vma->vm_ops = &mpx_vma_ops;
89
90 if (vm_flags & VM_LOCKED) {
91 up_write(&mm->mmap_sem);
92 mm_populate(ret, len);
93 return ret;
94 }
95
96out:
97 up_write(&mm->mmap_sem);
98 return ret;
99}
Dave Hansenfcc7ffd2014-11-14 07:18:28 -0800100
101enum reg_type {
102 REG_TYPE_RM = 0,
103 REG_TYPE_INDEX,
104 REG_TYPE_BASE,
105};
106
Dave Hansen68c009c2014-11-18 10:23:43 -0800107static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
108 enum reg_type type)
Dave Hansenfcc7ffd2014-11-14 07:18:28 -0800109{
110 int regno = 0;
111
112 static const int regoff[] = {
113 offsetof(struct pt_regs, ax),
114 offsetof(struct pt_regs, cx),
115 offsetof(struct pt_regs, dx),
116 offsetof(struct pt_regs, bx),
117 offsetof(struct pt_regs, sp),
118 offsetof(struct pt_regs, bp),
119 offsetof(struct pt_regs, si),
120 offsetof(struct pt_regs, di),
121#ifdef CONFIG_X86_64
122 offsetof(struct pt_regs, r8),
123 offsetof(struct pt_regs, r9),
124 offsetof(struct pt_regs, r10),
125 offsetof(struct pt_regs, r11),
126 offsetof(struct pt_regs, r12),
127 offsetof(struct pt_regs, r13),
128 offsetof(struct pt_regs, r14),
129 offsetof(struct pt_regs, r15),
130#endif
131 };
132 int nr_registers = ARRAY_SIZE(regoff);
133 /*
134 * Don't possibly decode a 32-bit instructions as
135 * reading a 64-bit-only register.
136 */
137 if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64)
138 nr_registers -= 8;
139
140 switch (type) {
141 case REG_TYPE_RM:
142 regno = X86_MODRM_RM(insn->modrm.value);
143 if (X86_REX_B(insn->rex_prefix.value) == 1)
144 regno += 8;
145 break;
146
147 case REG_TYPE_INDEX:
148 regno = X86_SIB_INDEX(insn->sib.value);
149 if (X86_REX_X(insn->rex_prefix.value) == 1)
150 regno += 8;
151 break;
152
153 case REG_TYPE_BASE:
154 regno = X86_SIB_BASE(insn->sib.value);
155 if (X86_REX_B(insn->rex_prefix.value) == 1)
156 regno += 8;
157 break;
158
159 default:
160 pr_err("invalid register type");
161 BUG();
162 break;
163 }
164
165 if (regno > nr_registers) {
166 WARN_ONCE(1, "decoded an instruction with an invalid register");
167 return -EINVAL;
168 }
169 return regoff[regno];
170}
171
172/*
173 * return the address being referenced be instruction
174 * for rm=3 returning the content of the rm reg
175 * for rm!=3 calculates the address using SIB and Disp
176 */
177static void __user *mpx_get_addr_ref(struct insn *insn, struct pt_regs *regs)
178{
Dave Hansen68c009c2014-11-18 10:23:43 -0800179 unsigned long addr, base, indx;
180 int addr_offset, base_offset, indx_offset;
Dave Hansenfcc7ffd2014-11-14 07:18:28 -0800181 insn_byte_t sib;
182
183 insn_get_modrm(insn);
184 insn_get_sib(insn);
185 sib = insn->sib.value;
186
187 if (X86_MODRM_MOD(insn->modrm.value) == 3) {
188 addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM);
189 if (addr_offset < 0)
190 goto out_err;
191 addr = regs_get_register(regs, addr_offset);
192 } else {
193 if (insn->sib.nbytes) {
194 base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE);
195 if (base_offset < 0)
196 goto out_err;
197
198 indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX);
199 if (indx_offset < 0)
200 goto out_err;
201
202 base = regs_get_register(regs, base_offset);
203 indx = regs_get_register(regs, indx_offset);
204 addr = base + indx * (1 << X86_SIB_SCALE(sib));
205 } else {
206 addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM);
207 if (addr_offset < 0)
208 goto out_err;
209 addr = regs_get_register(regs, addr_offset);
210 }
211 addr += insn->displacement.value;
212 }
213 return (void __user *)addr;
214out_err:
215 return (void __user *)-1;
216}
217
218static int mpx_insn_decode(struct insn *insn,
219 struct pt_regs *regs)
220{
221 unsigned char buf[MAX_INSN_SIZE];
222 int x86_64 = !test_thread_flag(TIF_IA32);
223 int not_copied;
224 int nr_copied;
225
226 not_copied = copy_from_user(buf, (void __user *)regs->ip, sizeof(buf));
227 nr_copied = sizeof(buf) - not_copied;
228 /*
229 * The decoder _should_ fail nicely if we pass it a short buffer.
230 * But, let's not depend on that implementation detail. If we
231 * did not get anything, just error out now.
232 */
233 if (!nr_copied)
234 return -EFAULT;
235 insn_init(insn, buf, nr_copied, x86_64);
236 insn_get_length(insn);
237 /*
238 * copy_from_user() tries to get as many bytes as we could see in
239 * the largest possible instruction. If the instruction we are
240 * after is shorter than that _and_ we attempt to copy from
241 * something unreadable, we might get a short read. This is OK
242 * as long as the read did not stop in the middle of the
243 * instruction. Check to see if we got a partial instruction.
244 */
245 if (nr_copied < insn->length)
246 return -EFAULT;
247
248 insn_get_opcode(insn);
249 /*
250 * We only _really_ need to decode bndcl/bndcn/bndcu
251 * Error out on anything else.
252 */
253 if (insn->opcode.bytes[0] != 0x0f)
254 goto bad_opcode;
255 if ((insn->opcode.bytes[1] != 0x1a) &&
256 (insn->opcode.bytes[1] != 0x1b))
257 goto bad_opcode;
258
259 return 0;
260bad_opcode:
261 return -EINVAL;
262}
263
264/*
265 * If a bounds overflow occurs then a #BR is generated. This
266 * function decodes MPX instructions to get violation address
267 * and set this address into extended struct siginfo.
268 *
269 * Note that this is not a super precise way of doing this.
270 * Userspace could have, by the time we get here, written
271 * anything it wants in to the instructions. We can not
272 * trust anything about it. They might not be valid
273 * instructions or might encode invalid registers, etc...
274 *
275 * The caller is expected to kfree() the returned siginfo_t.
276 */
Dave Hansen46a6e0c2015-06-07 11:37:02 -0700277siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
Dave Hansenfcc7ffd2014-11-14 07:18:28 -0800278{
Dave Hansena84eeaa2015-06-07 11:37:01 -0700279 const struct bndreg *bndregs, *bndreg;
Dave Hansenfe3d1972014-11-14 07:18:29 -0800280 siginfo_t *info = NULL;
Dave Hansenfcc7ffd2014-11-14 07:18:28 -0800281 struct insn insn;
282 uint8_t bndregno;
283 int err;
Dave Hansenfcc7ffd2014-11-14 07:18:28 -0800284
285 err = mpx_insn_decode(&insn, regs);
286 if (err)
287 goto err_out;
288
289 /*
290 * We know at this point that we are only dealing with
291 * MPX instructions.
292 */
293 insn_get_modrm(&insn);
294 bndregno = X86_MODRM_REG(insn.modrm.value);
295 if (bndregno > 3) {
296 err = -EINVAL;
297 goto err_out;
298 }
Dave Hansena84eeaa2015-06-07 11:37:01 -0700299 /* get bndregs field from current task's xsave area */
300 bndregs = get_xsave_field_ptr(XSTATE_BNDREGS);
Dave Hansenfe3d1972014-11-14 07:18:29 -0800301 if (!bndregs) {
302 err = -EINVAL;
303 goto err_out;
304 }
305 /* now go select the individual register in the set of 4 */
306 bndreg = &bndregs[bndregno];
307
Dave Hansenfcc7ffd2014-11-14 07:18:28 -0800308 info = kzalloc(sizeof(*info), GFP_KERNEL);
309 if (!info) {
310 err = -ENOMEM;
311 goto err_out;
312 }
313 /*
314 * The registers are always 64-bit, but the upper 32
315 * bits are ignored in 32-bit mode. Also, note that the
316 * upper bounds are architecturally represented in 1's
317 * complement form.
318 *
319 * The 'unsigned long' cast is because the compiler
320 * complains when casting from integers to different-size
321 * pointers.
322 */
Dave Hansenfe3d1972014-11-14 07:18:29 -0800323 info->si_lower = (void __user *)(unsigned long)bndreg->lower_bound;
324 info->si_upper = (void __user *)(unsigned long)~bndreg->upper_bound;
Dave Hansenfcc7ffd2014-11-14 07:18:28 -0800325 info->si_addr_lsb = 0;
326 info->si_signo = SIGSEGV;
327 info->si_errno = 0;
328 info->si_code = SEGV_BNDERR;
329 info->si_addr = mpx_get_addr_ref(&insn, regs);
330 /*
331 * We were not able to extract an address from the instruction,
332 * probably because there was something invalid in it.
333 */
334 if (info->si_addr == (void *)-1) {
335 err = -EINVAL;
336 goto err_out;
337 }
338 return info;
339err_out:
Dave Hansenfe3d1972014-11-14 07:18:29 -0800340 /* info might be NULL, but kfree() handles that */
341 kfree(info);
Dave Hansenfcc7ffd2014-11-14 07:18:28 -0800342 return ERR_PTR(err);
343}
Dave Hansenfe3d1972014-11-14 07:18:29 -0800344
Dave Hansen46a6e0c2015-06-07 11:37:02 -0700345static __user void *mpx_get_bounds_dir(void)
Dave Hansenfe3d1972014-11-14 07:18:29 -0800346{
Dave Hansena84eeaa2015-06-07 11:37:01 -0700347 const struct bndcsr *bndcsr;
Dave Hansenfe3d1972014-11-14 07:18:29 -0800348
349 if (!cpu_feature_enabled(X86_FEATURE_MPX))
350 return MPX_INVALID_BOUNDS_DIR;
351
352 /*
Dave Hansen814564a2015-01-08 14:30:20 -0800353 * 32-bit binaries on 64-bit kernels are currently
354 * unsupported.
355 */
356 if (IS_ENABLED(CONFIG_X86_64) && test_thread_flag(TIF_IA32))
357 return MPX_INVALID_BOUNDS_DIR;
358 /*
Dave Hansenfe3d1972014-11-14 07:18:29 -0800359 * The bounds directory pointer is stored in a register
360 * only accessible if we first do an xsave.
361 */
Dave Hansena84eeaa2015-06-07 11:37:01 -0700362 bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR);
Dave Hansenfe3d1972014-11-14 07:18:29 -0800363 if (!bndcsr)
364 return MPX_INVALID_BOUNDS_DIR;
365
366 /*
367 * Make sure the register looks valid by checking the
368 * enable bit.
369 */
370 if (!(bndcsr->bndcfgu & MPX_BNDCFG_ENABLE_FLAG))
371 return MPX_INVALID_BOUNDS_DIR;
372
373 /*
374 * Lastly, mask off the low bits used for configuration
375 * flags, and return the address of the bounds table.
376 */
377 return (void __user *)(unsigned long)
378 (bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK);
379}
380
Dave Hansen46a6e0c2015-06-07 11:37:02 -0700381int mpx_enable_management(void)
Dave Hansenfe3d1972014-11-14 07:18:29 -0800382{
383 void __user *bd_base = MPX_INVALID_BOUNDS_DIR;
Dave Hansen46a6e0c2015-06-07 11:37:02 -0700384 struct mm_struct *mm = current->mm;
Dave Hansenfe3d1972014-11-14 07:18:29 -0800385 int ret = 0;
386
387 /*
388 * runtime in the userspace will be responsible for allocation of
389 * the bounds directory. Then, it will save the base of the bounds
390 * directory into XSAVE/XRSTOR Save Area and enable MPX through
391 * XRSTOR instruction.
392 *
Dave Hansena84eeaa2015-06-07 11:37:01 -0700393 * The copy_xregs_to_kernel() beneath get_xsave_field_ptr() is
394 * expected to be relatively expensive. Storing the bounds
395 * directory here means that we do not have to do xsave in the
396 * unmap path; we can just use mm->bd_addr instead.
Dave Hansenfe3d1972014-11-14 07:18:29 -0800397 */
Dave Hansen46a6e0c2015-06-07 11:37:02 -0700398 bd_base = mpx_get_bounds_dir();
Dave Hansenfe3d1972014-11-14 07:18:29 -0800399 down_write(&mm->mmap_sem);
400 mm->bd_addr = bd_base;
401 if (mm->bd_addr == MPX_INVALID_BOUNDS_DIR)
402 ret = -ENXIO;
403
404 up_write(&mm->mmap_sem);
405 return ret;
406}
407
Dave Hansen46a6e0c2015-06-07 11:37:02 -0700408int mpx_disable_management(void)
Dave Hansenfe3d1972014-11-14 07:18:29 -0800409{
410 struct mm_struct *mm = current->mm;
411
412 if (!cpu_feature_enabled(X86_FEATURE_MPX))
413 return -ENXIO;
414
415 down_write(&mm->mmap_sem);
416 mm->bd_addr = MPX_INVALID_BOUNDS_DIR;
417 up_write(&mm->mmap_sem);
418 return 0;
419}
420
421/*
422 * With 32-bit mode, MPX_BT_SIZE_BYTES is 4MB, and the size of each
423 * bounds table is 16KB. With 64-bit mode, MPX_BT_SIZE_BYTES is 2GB,
424 * and the size of each bounds table is 4MB.
425 */
426static int allocate_bt(long __user *bd_entry)
427{
428 unsigned long expected_old_val = 0;
429 unsigned long actual_old_val = 0;
430 unsigned long bt_addr;
431 int ret = 0;
432
433 /*
434 * Carve the virtual space out of userspace for the new
435 * bounds table:
436 */
437 bt_addr = mpx_mmap(MPX_BT_SIZE_BYTES);
438 if (IS_ERR((void *)bt_addr))
439 return PTR_ERR((void *)bt_addr);
440 /*
441 * Set the valid flag (kinda like _PAGE_PRESENT in a pte)
442 */
443 bt_addr = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
444
445 /*
446 * Go poke the address of the new bounds table in to the
447 * bounds directory entry out in userspace memory. Note:
448 * we may race with another CPU instantiating the same table.
449 * In that case the cmpxchg will see an unexpected
450 * 'actual_old_val'.
451 *
452 * This can fault, but that's OK because we do not hold
453 * mmap_sem at this point, unlike some of the other part
454 * of the MPX code that have to pagefault_disable().
455 */
456 ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry,
457 expected_old_val, bt_addr);
458 if (ret)
459 goto out_unmap;
460
461 /*
462 * The user_atomic_cmpxchg_inatomic() will only return nonzero
463 * for faults, *not* if the cmpxchg itself fails. Now we must
464 * verify that the cmpxchg itself completed successfully.
465 */
466 /*
467 * We expected an empty 'expected_old_val', but instead found
468 * an apparently valid entry. Assume we raced with another
469 * thread to instantiate this table and desclare succecss.
470 */
471 if (actual_old_val & MPX_BD_ENTRY_VALID_FLAG) {
472 ret = 0;
473 goto out_unmap;
474 }
475 /*
476 * We found a non-empty bd_entry but it did not have the
477 * VALID_FLAG set. Return an error which will result in
478 * a SEGV since this probably means that somebody scribbled
479 * some invalid data in to a bounds table.
480 */
481 if (expected_old_val != actual_old_val) {
482 ret = -EINVAL;
483 goto out_unmap;
484 }
485 return 0;
486out_unmap:
487 vm_munmap(bt_addr & MPX_BT_ADDR_MASK, MPX_BT_SIZE_BYTES);
488 return ret;
489}
490
491/*
492 * When a BNDSTX instruction attempts to save bounds to a bounds
493 * table, it will first attempt to look up the table in the
494 * first-level bounds directory. If it does not find a table in
495 * the directory, a #BR is generated and we get here in order to
496 * allocate a new table.
497 *
498 * With 32-bit mode, the size of BD is 4MB, and the size of each
499 * bound table is 16KB. With 64-bit mode, the size of BD is 2GB,
500 * and the size of each bound table is 4MB.
501 */
Dave Hansen46a6e0c2015-06-07 11:37:02 -0700502static int do_mpx_bt_fault(void)
Dave Hansenfe3d1972014-11-14 07:18:29 -0800503{
504 unsigned long bd_entry, bd_base;
Dave Hansena84eeaa2015-06-07 11:37:01 -0700505 const struct bndcsr *bndcsr;
Dave Hansenfe3d1972014-11-14 07:18:29 -0800506
Dave Hansena84eeaa2015-06-07 11:37:01 -0700507 bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR);
Dave Hansenfe3d1972014-11-14 07:18:29 -0800508 if (!bndcsr)
509 return -EINVAL;
510 /*
511 * Mask off the preserve and enable bits
512 */
513 bd_base = bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK;
514 /*
515 * The hardware provides the address of the missing or invalid
516 * entry via BNDSTATUS, so we don't have to go look it up.
517 */
518 bd_entry = bndcsr->bndstatus & MPX_BNDSTA_ADDR_MASK;
519 /*
520 * Make sure the directory entry is within where we think
521 * the directory is.
522 */
523 if ((bd_entry < bd_base) ||
524 (bd_entry >= bd_base + MPX_BD_SIZE_BYTES))
525 return -EINVAL;
526
527 return allocate_bt((long __user *)bd_entry);
528}
529
Dave Hansen46a6e0c2015-06-07 11:37:02 -0700530int mpx_handle_bd_fault(void)
Dave Hansenfe3d1972014-11-14 07:18:29 -0800531{
532 /*
533 * Userspace never asked us to manage the bounds tables,
534 * so refuse to help.
535 */
536 if (!kernel_managing_mpx_tables(current->mm))
537 return -EINVAL;
538
Dave Hansen46a6e0c2015-06-07 11:37:02 -0700539 if (do_mpx_bt_fault()) {
Dave Hansenfe3d1972014-11-14 07:18:29 -0800540 force_sig(SIGSEGV, current);
541 /*
542 * The force_sig() is essentially "handling" this
543 * exception, so we do not pass up the error
544 * from do_mpx_bt_fault().
545 */
546 }
547 return 0;
548}
Dave Hansen1de4fa12014-11-14 07:18:31 -0800549
550/*
551 * A thin wrapper around get_user_pages(). Returns 0 if the
552 * fault was resolved or -errno if not.
553 */
554static int mpx_resolve_fault(long __user *addr, int write)
555{
556 long gup_ret;
557 int nr_pages = 1;
558 int force = 0;
559
560 gup_ret = get_user_pages(current, current->mm, (unsigned long)addr,
561 nr_pages, write, force, NULL, NULL);
562 /*
563 * get_user_pages() returns number of pages gotten.
564 * 0 means we failed to fault in and get anything,
565 * probably because 'addr' is bad.
566 */
567 if (!gup_ret)
568 return -EFAULT;
569 /* Other error, return it */
570 if (gup_ret < 0)
571 return gup_ret;
572 /* must have gup'd a page and gup_ret>0, success */
573 return 0;
574}
575
576/*
577 * Get the base of bounds tables pointed by specific bounds
578 * directory entry.
579 */
580static int get_bt_addr(struct mm_struct *mm,
581 long __user *bd_entry, unsigned long *bt_addr)
582{
583 int ret;
584 int valid_bit;
585
586 if (!access_ok(VERIFY_READ, (bd_entry), sizeof(*bd_entry)))
587 return -EFAULT;
588
589 while (1) {
590 int need_write = 0;
591
592 pagefault_disable();
593 ret = get_user(*bt_addr, bd_entry);
594 pagefault_enable();
595 if (!ret)
596 break;
597 if (ret == -EFAULT)
598 ret = mpx_resolve_fault(bd_entry, need_write);
599 /*
600 * If we could not resolve the fault, consider it
601 * userspace's fault and error out.
602 */
603 if (ret)
604 return ret;
605 }
606
607 valid_bit = *bt_addr & MPX_BD_ENTRY_VALID_FLAG;
608 *bt_addr &= MPX_BT_ADDR_MASK;
609
610 /*
611 * When the kernel is managing bounds tables, a bounds directory
612 * entry will either have a valid address (plus the valid bit)
613 * *OR* be completely empty. If we see a !valid entry *and* some
614 * data in the address field, we know something is wrong. This
615 * -EINVAL return will cause a SIGSEGV.
616 */
617 if (!valid_bit && *bt_addr)
618 return -EINVAL;
619 /*
620 * Do we have an completely zeroed bt entry? That is OK. It
621 * just means there was no bounds table for this memory. Make
622 * sure to distinguish this from -EINVAL, which will cause
623 * a SEGV.
624 */
625 if (!valid_bit)
626 return -ENOENT;
627
628 return 0;
629}
630
631/*
632 * Free the backing physical pages of bounds table 'bt_addr'.
633 * Assume start...end is within that bounds table.
634 */
635static int zap_bt_entries(struct mm_struct *mm,
636 unsigned long bt_addr,
637 unsigned long start, unsigned long end)
638{
639 struct vm_area_struct *vma;
640 unsigned long addr, len;
641
642 /*
643 * Find the first overlapping vma. If vma->vm_start > start, there
644 * will be a hole in the bounds table. This -EINVAL return will
645 * cause a SIGSEGV.
646 */
647 vma = find_vma(mm, start);
648 if (!vma || vma->vm_start > start)
649 return -EINVAL;
650
651 /*
652 * A NUMA policy on a VM_MPX VMA could cause this bouds table to
653 * be split. So we need to look across the entire 'start -> end'
654 * range of this bounds table, find all of the VM_MPX VMAs, and
655 * zap only those.
656 */
657 addr = start;
658 while (vma && vma->vm_start < end) {
659 /*
660 * We followed a bounds directory entry down
661 * here. If we find a non-MPX VMA, that's bad,
662 * so stop immediately and return an error. This
663 * probably results in a SIGSEGV.
664 */
665 if (!is_mpx_vma(vma))
666 return -EINVAL;
667
668 len = min(vma->vm_end, end) - addr;
669 zap_page_range(vma, addr, len, NULL);
670
671 vma = vma->vm_next;
672 addr = vma->vm_start;
673 }
674
675 return 0;
676}
677
678static int unmap_single_bt(struct mm_struct *mm,
679 long __user *bd_entry, unsigned long bt_addr)
680{
681 unsigned long expected_old_val = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
682 unsigned long actual_old_val = 0;
683 int ret;
684
685 while (1) {
686 int need_write = 1;
687
688 pagefault_disable();
689 ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry,
690 expected_old_val, 0);
691 pagefault_enable();
692 if (!ret)
693 break;
694 if (ret == -EFAULT)
695 ret = mpx_resolve_fault(bd_entry, need_write);
696 /*
697 * If we could not resolve the fault, consider it
698 * userspace's fault and error out.
699 */
700 if (ret)
701 return ret;
702 }
703 /*
704 * The cmpxchg was performed, check the results.
705 */
706 if (actual_old_val != expected_old_val) {
707 /*
708 * Someone else raced with us to unmap the table.
709 * There was no bounds table pointed to by the
710 * directory, so declare success. Somebody freed
711 * it.
712 */
713 if (!actual_old_val)
714 return 0;
715 /*
716 * Something messed with the bounds directory
717 * entry. We hold mmap_sem for read or write
718 * here, so it could not be a _new_ bounds table
719 * that someone just allocated. Something is
720 * wrong, so pass up the error and SIGSEGV.
721 */
722 return -EINVAL;
723 }
724
725 /*
726 * Note, we are likely being called under do_munmap() already. To
727 * avoid recursion, do_munmap() will check whether it comes
728 * from one bounds table through VM_MPX flag.
729 */
730 return do_munmap(mm, bt_addr, MPX_BT_SIZE_BYTES);
731}
732
733/*
734 * If the bounds table pointed by bounds directory 'bd_entry' is
735 * not shared, unmap this whole bounds table. Otherwise, only free
736 * those backing physical pages of bounds table entries covered
737 * in this virtual address region start...end.
738 */
739static int unmap_shared_bt(struct mm_struct *mm,
740 long __user *bd_entry, unsigned long start,
741 unsigned long end, bool prev_shared, bool next_shared)
742{
743 unsigned long bt_addr;
744 int ret;
745
746 ret = get_bt_addr(mm, bd_entry, &bt_addr);
747 /*
748 * We could see an "error" ret for not-present bounds
749 * tables (not really an error), or actual errors, but
750 * stop unmapping either way.
751 */
752 if (ret)
753 return ret;
754
755 if (prev_shared && next_shared)
756 ret = zap_bt_entries(mm, bt_addr,
757 bt_addr+MPX_GET_BT_ENTRY_OFFSET(start),
758 bt_addr+MPX_GET_BT_ENTRY_OFFSET(end));
759 else if (prev_shared)
760 ret = zap_bt_entries(mm, bt_addr,
761 bt_addr+MPX_GET_BT_ENTRY_OFFSET(start),
762 bt_addr+MPX_BT_SIZE_BYTES);
763 else if (next_shared)
764 ret = zap_bt_entries(mm, bt_addr, bt_addr,
765 bt_addr+MPX_GET_BT_ENTRY_OFFSET(end));
766 else
767 ret = unmap_single_bt(mm, bd_entry, bt_addr);
768
769 return ret;
770}
771
772/*
773 * A virtual address region being munmap()ed might share bounds table
774 * with adjacent VMAs. We only need to free the backing physical
775 * memory of these shared bounds tables entries covered in this virtual
776 * address region.
777 */
778static int unmap_edge_bts(struct mm_struct *mm,
779 unsigned long start, unsigned long end)
780{
781 int ret;
782 long __user *bde_start, *bde_end;
783 struct vm_area_struct *prev, *next;
784 bool prev_shared = false, next_shared = false;
785
786 bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start);
787 bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1);
788
789 /*
790 * Check whether bde_start and bde_end are shared with adjacent
791 * VMAs.
792 *
793 * We already unliked the VMAs from the mm's rbtree so 'start'
794 * is guaranteed to be in a hole. This gets us the first VMA
795 * before the hole in to 'prev' and the next VMA after the hole
796 * in to 'next'.
797 */
798 next = find_vma_prev(mm, start, &prev);
799 if (prev && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(prev->vm_end-1))
800 == bde_start)
801 prev_shared = true;
802 if (next && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(next->vm_start))
803 == bde_end)
804 next_shared = true;
805
806 /*
807 * This virtual address region being munmap()ed is only
808 * covered by one bounds table.
809 *
810 * In this case, if this table is also shared with adjacent
811 * VMAs, only part of the backing physical memory of the bounds
812 * table need be freeed. Otherwise the whole bounds table need
813 * be unmapped.
814 */
815 if (bde_start == bde_end) {
816 return unmap_shared_bt(mm, bde_start, start, end,
817 prev_shared, next_shared);
818 }
819
820 /*
821 * If more than one bounds tables are covered in this virtual
822 * address region being munmap()ed, we need to separately check
823 * whether bde_start and bde_end are shared with adjacent VMAs.
824 */
825 ret = unmap_shared_bt(mm, bde_start, start, end, prev_shared, false);
826 if (ret)
827 return ret;
828 ret = unmap_shared_bt(mm, bde_end, start, end, false, next_shared);
829 if (ret)
830 return ret;
831
832 return 0;
833}
834
835static int mpx_unmap_tables(struct mm_struct *mm,
836 unsigned long start, unsigned long end)
837{
838 int ret;
839 long __user *bd_entry, *bde_start, *bde_end;
840 unsigned long bt_addr;
841
842 /*
843 * "Edge" bounds tables are those which are being used by the region
844 * (start -> end), but that may be shared with adjacent areas. If they
845 * turn out to be completely unshared, they will be freed. If they are
846 * shared, we will free the backing store (like an MADV_DONTNEED) for
847 * areas used by this region.
848 */
849 ret = unmap_edge_bts(mm, start, end);
850 switch (ret) {
851 /* non-present tables are OK */
852 case 0:
853 case -ENOENT:
854 /* Success, or no tables to unmap */
855 break;
856 case -EINVAL:
857 case -EFAULT:
858 default:
859 return ret;
860 }
861
862 /*
863 * Only unmap the bounds table that are
864 * 1. fully covered
865 * 2. not at the edges of the mapping, even if full aligned
866 */
867 bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start);
868 bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1);
869 for (bd_entry = bde_start + 1; bd_entry < bde_end; bd_entry++) {
870 ret = get_bt_addr(mm, bd_entry, &bt_addr);
871 switch (ret) {
872 case 0:
873 break;
874 case -ENOENT:
875 /* No table here, try the next one */
876 continue;
877 case -EINVAL:
878 case -EFAULT:
879 default:
880 /*
881 * Note: we are being strict here.
882 * Any time we run in to an issue
883 * unmapping tables, we stop and
884 * SIGSEGV.
885 */
886 return ret;
887 }
888
889 ret = unmap_single_bt(mm, bd_entry, bt_addr);
890 if (ret)
891 return ret;
892 }
893
894 return 0;
895}
896
897/*
898 * Free unused bounds tables covered in a virtual address region being
899 * munmap()ed. Assume end > start.
900 *
901 * This function will be called by do_munmap(), and the VMAs covering
902 * the virtual address region start...end have already been split if
903 * necessary, and the 'vma' is the first vma in this range (start -> end).
904 */
905void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
906 unsigned long start, unsigned long end)
907{
908 int ret;
909
910 /*
911 * Refuse to do anything unless userspace has asked
912 * the kernel to help manage the bounds tables,
913 */
914 if (!kernel_managing_mpx_tables(current->mm))
915 return;
916 /*
917 * This will look across the entire 'start -> end' range,
918 * and find all of the non-VM_MPX VMAs.
919 *
920 * To avoid recursion, if a VM_MPX vma is found in the range
921 * (start->end), we will not continue follow-up work. This
922 * recursion represents having bounds tables for bounds tables,
923 * which should not occur normally. Being strict about it here
924 * helps ensure that we do not have an exploitable stack overflow.
925 */
926 do {
927 if (vma->vm_flags & VM_MPX)
928 return;
929 vma = vma->vm_next;
930 } while (vma && vma->vm_start < end);
931
932 ret = mpx_unmap_tables(mm, start, end);
933 if (ret)
934 force_sig(SIGSEGV, current);
935}