2 * mpx.c - Memory Protection eXtensions
4 * Copyright (c) 2014, Intel Corporation.
5 * Qiaowei Ren <qiaowei.ren@intel.com>
6 * Dave Hansen <dave.hansen@intel.com>
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/syscalls.h>
11 #include <linux/sched/sysctl.h>
16 #include <asm/mmu_context.h>
18 #include <asm/processor.h>
19 #include <asm/fpu-internal.h>
22 * This is really a simplified "vm_mmap". it only handles MPX
23 * bounds tables (the bounds directory is user-allocated).
25 static unsigned long mpx_mmap(unsigned long len)
28 unsigned long addr, pgoff;
29 struct mm_struct *mm = current->mm;
31 struct vm_area_struct *vma;
33 /* Only bounds table and bounds directory can be allocated here */
34 if (len != MPX_BD_SIZE_BYTES && len != MPX_BT_SIZE_BYTES)
37 down_write(&mm->mmap_sem);
39 /* Too many mappings? */
40 if (mm->map_count > sysctl_max_map_count) {
45 /* Obtain the address to map to. we verify (or select) it and ensure
46 * that it represents a valid section of the address space.
48 addr = get_unmapped_area(NULL, 0, len, 0, MAP_ANONYMOUS | MAP_PRIVATE);
49 if (addr & ~PAGE_MASK) {
54 vm_flags = VM_READ | VM_WRITE | VM_MPX |
55 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
57 /* Set pgoff according to addr for anon_vma */
58 pgoff = addr >> PAGE_SHIFT;
60 ret = mmap_region(NULL, addr, len, vm_flags, pgoff);
61 if (IS_ERR_VALUE(ret))
64 vma = find_vma(mm, ret);
70 if (vm_flags & VM_LOCKED) {
71 up_write(&mm->mmap_sem);
72 mm_populate(ret, len);
77 up_write(&mm->mmap_sem);
87 static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
92 static const int regoff[] = {
93 offsetof(struct pt_regs, ax),
94 offsetof(struct pt_regs, cx),
95 offsetof(struct pt_regs, dx),
96 offsetof(struct pt_regs, bx),
97 offsetof(struct pt_regs, sp),
98 offsetof(struct pt_regs, bp),
99 offsetof(struct pt_regs, si),
100 offsetof(struct pt_regs, di),
102 offsetof(struct pt_regs, r8),
103 offsetof(struct pt_regs, r9),
104 offsetof(struct pt_regs, r10),
105 offsetof(struct pt_regs, r11),
106 offsetof(struct pt_regs, r12),
107 offsetof(struct pt_regs, r13),
108 offsetof(struct pt_regs, r14),
109 offsetof(struct pt_regs, r15),
112 int nr_registers = ARRAY_SIZE(regoff);
114 * Don't possibly decode a 32-bit instructions as
115 * reading a 64-bit-only register.
117 if (IS_ENABLED(CONFIG_X86_64) && !insn->x86_64)
122 regno = X86_MODRM_RM(insn->modrm.value);
123 if (X86_REX_B(insn->rex_prefix.value) == 1)
128 regno = X86_SIB_INDEX(insn->sib.value);
129 if (X86_REX_X(insn->rex_prefix.value) == 1)
134 regno = X86_SIB_BASE(insn->sib.value);
135 if (X86_REX_B(insn->rex_prefix.value) == 1)
140 pr_err("invalid register type");
145 if (regno > nr_registers) {
146 WARN_ONCE(1, "decoded an instruction with an invalid register");
149 return regoff[regno];
153 * return the address being referenced be instruction
154 * for rm=3 returning the content of the rm reg
155 * for rm!=3 calculates the address using SIB and Disp
157 static void __user *mpx_get_addr_ref(struct insn *insn, struct pt_regs *regs)
159 unsigned long addr, base, indx;
160 int addr_offset, base_offset, indx_offset;
163 insn_get_modrm(insn);
165 sib = insn->sib.value;
167 if (X86_MODRM_MOD(insn->modrm.value) == 3) {
168 addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM);
171 addr = regs_get_register(regs, addr_offset);
173 if (insn->sib.nbytes) {
174 base_offset = get_reg_offset(insn, regs, REG_TYPE_BASE);
178 indx_offset = get_reg_offset(insn, regs, REG_TYPE_INDEX);
182 base = regs_get_register(regs, base_offset);
183 indx = regs_get_register(regs, indx_offset);
184 addr = base + indx * (1 << X86_SIB_SCALE(sib));
186 addr_offset = get_reg_offset(insn, regs, REG_TYPE_RM);
189 addr = regs_get_register(regs, addr_offset);
191 addr += insn->displacement.value;
193 return (void __user *)addr;
195 return (void __user *)-1;
198 static int mpx_insn_decode(struct insn *insn,
199 struct pt_regs *regs)
201 unsigned char buf[MAX_INSN_SIZE];
202 int x86_64 = !test_thread_flag(TIF_IA32);
206 not_copied = copy_from_user(buf, (void __user *)regs->ip, sizeof(buf));
207 nr_copied = sizeof(buf) - not_copied;
209 * The decoder _should_ fail nicely if we pass it a short buffer.
210 * But, let's not depend on that implementation detail. If we
211 * did not get anything, just error out now.
215 insn_init(insn, buf, nr_copied, x86_64);
216 insn_get_length(insn);
218 * copy_from_user() tries to get as many bytes as we could see in
219 * the largest possible instruction. If the instruction we are
220 * after is shorter than that _and_ we attempt to copy from
221 * something unreadable, we might get a short read. This is OK
222 * as long as the read did not stop in the middle of the
223 * instruction. Check to see if we got a partial instruction.
225 if (nr_copied < insn->length)
228 insn_get_opcode(insn);
230 * We only _really_ need to decode bndcl/bndcn/bndcu
231 * Error out on anything else.
233 if (insn->opcode.bytes[0] != 0x0f)
235 if ((insn->opcode.bytes[1] != 0x1a) &&
236 (insn->opcode.bytes[1] != 0x1b))
245 * If a bounds overflow occurs then a #BR is generated. This
246 * function decodes MPX instructions to get violation address
247 * and set this address into extended struct siginfo.
249 * Note that this is not a super precise way of doing this.
250 * Userspace could have, by the time we get here, written
251 * anything it wants in to the instructions. We can not
252 * trust anything about it. They might not be valid
253 * instructions or might encode invalid registers, etc...
255 * The caller is expected to kfree() the returned siginfo_t.
257 siginfo_t *mpx_generate_siginfo(struct pt_regs *regs,
258 struct xsave_struct *xsave_buf)
260 struct bndreg *bndregs, *bndreg;
261 siginfo_t *info = NULL;
266 err = mpx_insn_decode(&insn, regs);
271 * We know at this point that we are only dealing with
274 insn_get_modrm(&insn);
275 bndregno = X86_MODRM_REG(insn.modrm.value);
280 /* get the bndregs _area_ of the xsave structure */
281 bndregs = get_xsave_addr(xsave_buf, XSTATE_BNDREGS);
286 /* now go select the individual register in the set of 4 */
287 bndreg = &bndregs[bndregno];
289 info = kzalloc(sizeof(*info), GFP_KERNEL);
295 * The registers are always 64-bit, but the upper 32
296 * bits are ignored in 32-bit mode. Also, note that the
297 * upper bounds are architecturally represented in 1's
300 * The 'unsigned long' cast is because the compiler
301 * complains when casting from integers to different-size
304 info->si_lower = (void __user *)(unsigned long)bndreg->lower_bound;
305 info->si_upper = (void __user *)(unsigned long)~bndreg->upper_bound;
306 info->si_addr_lsb = 0;
307 info->si_signo = SIGSEGV;
309 info->si_code = SEGV_BNDERR;
310 info->si_addr = mpx_get_addr_ref(&insn, regs);
312 * We were not able to extract an address from the instruction,
313 * probably because there was something invalid in it.
315 if (info->si_addr == (void *)-1) {
321 /* info might be NULL, but kfree() handles that */
326 static __user void *task_get_bounds_dir(struct task_struct *tsk)
328 struct bndcsr *bndcsr;
330 if (!cpu_feature_enabled(X86_FEATURE_MPX))
331 return MPX_INVALID_BOUNDS_DIR;
334 * 32-bit binaries on 64-bit kernels are currently
337 if (IS_ENABLED(CONFIG_X86_64) && test_thread_flag(TIF_IA32))
338 return MPX_INVALID_BOUNDS_DIR;
340 * The bounds directory pointer is stored in a register
341 * only accessible if we first do an xsave.
343 fpu_save_init(&tsk->thread.fpu);
344 bndcsr = get_xsave_addr(&tsk->thread.fpu.state->xsave, XSTATE_BNDCSR);
346 return MPX_INVALID_BOUNDS_DIR;
349 * Make sure the register looks valid by checking the
352 if (!(bndcsr->bndcfgu & MPX_BNDCFG_ENABLE_FLAG))
353 return MPX_INVALID_BOUNDS_DIR;
356 * Lastly, mask off the low bits used for configuration
357 * flags, and return the address of the bounds table.
359 return (void __user *)(unsigned long)
360 (bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK);
363 int mpx_enable_management(struct task_struct *tsk)
365 void __user *bd_base = MPX_INVALID_BOUNDS_DIR;
366 struct mm_struct *mm = tsk->mm;
370 * runtime in the userspace will be responsible for allocation of
371 * the bounds directory. Then, it will save the base of the bounds
372 * directory into XSAVE/XRSTOR Save Area and enable MPX through
373 * XRSTOR instruction.
375 * fpu_xsave() is expected to be very expensive. Storing the bounds
376 * directory here means that we do not have to do xsave in the unmap
377 * path; we can just use mm->bd_addr instead.
379 bd_base = task_get_bounds_dir(tsk);
380 down_write(&mm->mmap_sem);
381 mm->bd_addr = bd_base;
382 if (mm->bd_addr == MPX_INVALID_BOUNDS_DIR)
385 up_write(&mm->mmap_sem);
389 int mpx_disable_management(struct task_struct *tsk)
391 struct mm_struct *mm = current->mm;
393 if (!cpu_feature_enabled(X86_FEATURE_MPX))
396 down_write(&mm->mmap_sem);
397 mm->bd_addr = MPX_INVALID_BOUNDS_DIR;
398 up_write(&mm->mmap_sem);
403 * With 32-bit mode, MPX_BT_SIZE_BYTES is 4MB, and the size of each
404 * bounds table is 16KB. With 64-bit mode, MPX_BT_SIZE_BYTES is 2GB,
405 * and the size of each bounds table is 4MB.
407 static int allocate_bt(long __user *bd_entry)
409 unsigned long expected_old_val = 0;
410 unsigned long actual_old_val = 0;
411 unsigned long bt_addr;
415 * Carve the virtual space out of userspace for the new
418 bt_addr = mpx_mmap(MPX_BT_SIZE_BYTES);
419 if (IS_ERR((void *)bt_addr))
420 return PTR_ERR((void *)bt_addr);
422 * Set the valid flag (kinda like _PAGE_PRESENT in a pte)
424 bt_addr = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
427 * Go poke the address of the new bounds table in to the
428 * bounds directory entry out in userspace memory. Note:
429 * we may race with another CPU instantiating the same table.
430 * In that case the cmpxchg will see an unexpected
433 * This can fault, but that's OK because we do not hold
434 * mmap_sem at this point, unlike some of the other part
435 * of the MPX code that have to pagefault_disable().
437 ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry,
438 expected_old_val, bt_addr);
443 * The user_atomic_cmpxchg_inatomic() will only return nonzero
444 * for faults, *not* if the cmpxchg itself fails. Now we must
445 * verify that the cmpxchg itself completed successfully.
448 * We expected an empty 'expected_old_val', but instead found
449 * an apparently valid entry. Assume we raced with another
450 * thread to instantiate this table and desclare succecss.
452 if (actual_old_val & MPX_BD_ENTRY_VALID_FLAG) {
457 * We found a non-empty bd_entry but it did not have the
458 * VALID_FLAG set. Return an error which will result in
459 * a SEGV since this probably means that somebody scribbled
460 * some invalid data in to a bounds table.
462 if (expected_old_val != actual_old_val) {
468 vm_munmap(bt_addr & MPX_BT_ADDR_MASK, MPX_BT_SIZE_BYTES);
473 * When a BNDSTX instruction attempts to save bounds to a bounds
474 * table, it will first attempt to look up the table in the
475 * first-level bounds directory. If it does not find a table in
476 * the directory, a #BR is generated and we get here in order to
477 * allocate a new table.
479 * With 32-bit mode, the size of BD is 4MB, and the size of each
480 * bound table is 16KB. With 64-bit mode, the size of BD is 2GB,
481 * and the size of each bound table is 4MB.
483 static int do_mpx_bt_fault(struct xsave_struct *xsave_buf)
485 unsigned long bd_entry, bd_base;
486 struct bndcsr *bndcsr;
488 bndcsr = get_xsave_addr(xsave_buf, XSTATE_BNDCSR);
492 * Mask off the preserve and enable bits
494 bd_base = bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK;
496 * The hardware provides the address of the missing or invalid
497 * entry via BNDSTATUS, so we don't have to go look it up.
499 bd_entry = bndcsr->bndstatus & MPX_BNDSTA_ADDR_MASK;
501 * Make sure the directory entry is within where we think
504 if ((bd_entry < bd_base) ||
505 (bd_entry >= bd_base + MPX_BD_SIZE_BYTES))
508 return allocate_bt((long __user *)bd_entry);
511 int mpx_handle_bd_fault(struct xsave_struct *xsave_buf)
514 * Userspace never asked us to manage the bounds tables,
517 if (!kernel_managing_mpx_tables(current->mm))
520 if (do_mpx_bt_fault(xsave_buf)) {
521 force_sig(SIGSEGV, current);
523 * The force_sig() is essentially "handling" this
524 * exception, so we do not pass up the error
525 * from do_mpx_bt_fault().
532 * A thin wrapper around get_user_pages(). Returns 0 if the
533 * fault was resolved or -errno if not.
535 static int mpx_resolve_fault(long __user *addr, int write)
541 gup_ret = get_user_pages(current, current->mm, (unsigned long)addr,
542 nr_pages, write, force, NULL, NULL);
544 * get_user_pages() returns number of pages gotten.
545 * 0 means we failed to fault in and get anything,
546 * probably because 'addr' is bad.
550 /* Other error, return it */
553 /* must have gup'd a page and gup_ret>0, success */
558 * Get the base of bounds tables pointed by specific bounds
561 static int get_bt_addr(struct mm_struct *mm,
562 long __user *bd_entry, unsigned long *bt_addr)
567 if (!access_ok(VERIFY_READ, (bd_entry), sizeof(*bd_entry)))
574 ret = get_user(*bt_addr, bd_entry);
579 ret = mpx_resolve_fault(bd_entry, need_write);
581 * If we could not resolve the fault, consider it
582 * userspace's fault and error out.
588 valid_bit = *bt_addr & MPX_BD_ENTRY_VALID_FLAG;
589 *bt_addr &= MPX_BT_ADDR_MASK;
592 * When the kernel is managing bounds tables, a bounds directory
593 * entry will either have a valid address (plus the valid bit)
594 * *OR* be completely empty. If we see a !valid entry *and* some
595 * data in the address field, we know something is wrong. This
596 * -EINVAL return will cause a SIGSEGV.
598 if (!valid_bit && *bt_addr)
601 * Do we have an completely zeroed bt entry? That is OK. It
602 * just means there was no bounds table for this memory. Make
603 * sure to distinguish this from -EINVAL, which will cause
613 * Free the backing physical pages of bounds table 'bt_addr'.
614 * Assume start...end is within that bounds table.
616 static int zap_bt_entries(struct mm_struct *mm,
617 unsigned long bt_addr,
618 unsigned long start, unsigned long end)
620 struct vm_area_struct *vma;
621 unsigned long addr, len;
624 * Find the first overlapping vma. If vma->vm_start > start, there
625 * will be a hole in the bounds table. This -EINVAL return will
628 vma = find_vma(mm, start);
629 if (!vma || vma->vm_start > start)
633 * A NUMA policy on a VM_MPX VMA could cause this bouds table to
634 * be split. So we need to look across the entire 'start -> end'
635 * range of this bounds table, find all of the VM_MPX VMAs, and
639 while (vma && vma->vm_start < end) {
641 * We followed a bounds directory entry down
642 * here. If we find a non-MPX VMA, that's bad,
643 * so stop immediately and return an error. This
644 * probably results in a SIGSEGV.
646 if (!(vma->vm_flags & VM_MPX))
649 len = min(vma->vm_end, end) - addr;
650 zap_page_range(vma, addr, len, NULL);
653 addr = vma->vm_start;
659 static int unmap_single_bt(struct mm_struct *mm,
660 long __user *bd_entry, unsigned long bt_addr)
662 unsigned long expected_old_val = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
663 unsigned long actual_old_val = 0;
670 ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry,
671 expected_old_val, 0);
676 ret = mpx_resolve_fault(bd_entry, need_write);
678 * If we could not resolve the fault, consider it
679 * userspace's fault and error out.
685 * The cmpxchg was performed, check the results.
687 if (actual_old_val != expected_old_val) {
689 * Someone else raced with us to unmap the table.
690 * There was no bounds table pointed to by the
691 * directory, so declare success. Somebody freed
697 * Something messed with the bounds directory
698 * entry. We hold mmap_sem for read or write
699 * here, so it could not be a _new_ bounds table
700 * that someone just allocated. Something is
701 * wrong, so pass up the error and SIGSEGV.
707 * Note, we are likely being called under do_munmap() already. To
708 * avoid recursion, do_munmap() will check whether it comes
709 * from one bounds table through VM_MPX flag.
711 return do_munmap(mm, bt_addr, MPX_BT_SIZE_BYTES);
715 * If the bounds table pointed by bounds directory 'bd_entry' is
716 * not shared, unmap this whole bounds table. Otherwise, only free
717 * those backing physical pages of bounds table entries covered
718 * in this virtual address region start...end.
720 static int unmap_shared_bt(struct mm_struct *mm,
721 long __user *bd_entry, unsigned long start,
722 unsigned long end, bool prev_shared, bool next_shared)
724 unsigned long bt_addr;
727 ret = get_bt_addr(mm, bd_entry, &bt_addr);
729 * We could see an "error" ret for not-present bounds
730 * tables (not really an error), or actual errors, but
731 * stop unmapping either way.
736 if (prev_shared && next_shared)
737 ret = zap_bt_entries(mm, bt_addr,
738 bt_addr+MPX_GET_BT_ENTRY_OFFSET(start),
739 bt_addr+MPX_GET_BT_ENTRY_OFFSET(end));
740 else if (prev_shared)
741 ret = zap_bt_entries(mm, bt_addr,
742 bt_addr+MPX_GET_BT_ENTRY_OFFSET(start),
743 bt_addr+MPX_BT_SIZE_BYTES);
744 else if (next_shared)
745 ret = zap_bt_entries(mm, bt_addr, bt_addr,
746 bt_addr+MPX_GET_BT_ENTRY_OFFSET(end));
748 ret = unmap_single_bt(mm, bd_entry, bt_addr);
754 * A virtual address region being munmap()ed might share bounds table
755 * with adjacent VMAs. We only need to free the backing physical
756 * memory of these shared bounds tables entries covered in this virtual
759 static int unmap_edge_bts(struct mm_struct *mm,
760 unsigned long start, unsigned long end)
763 long __user *bde_start, *bde_end;
764 struct vm_area_struct *prev, *next;
765 bool prev_shared = false, next_shared = false;
767 bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start);
768 bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1);
771 * Check whether bde_start and bde_end are shared with adjacent
774 * We already unliked the VMAs from the mm's rbtree so 'start'
775 * is guaranteed to be in a hole. This gets us the first VMA
776 * before the hole in to 'prev' and the next VMA after the hole
779 next = find_vma_prev(mm, start, &prev);
780 if (prev && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(prev->vm_end-1))
783 if (next && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(next->vm_start))
788 * This virtual address region being munmap()ed is only
789 * covered by one bounds table.
791 * In this case, if this table is also shared with adjacent
792 * VMAs, only part of the backing physical memory of the bounds
793 * table need be freeed. Otherwise the whole bounds table need
796 if (bde_start == bde_end) {
797 return unmap_shared_bt(mm, bde_start, start, end,
798 prev_shared, next_shared);
802 * If more than one bounds tables are covered in this virtual
803 * address region being munmap()ed, we need to separately check
804 * whether bde_start and bde_end are shared with adjacent VMAs.
806 ret = unmap_shared_bt(mm, bde_start, start, end, prev_shared, false);
809 ret = unmap_shared_bt(mm, bde_end, start, end, false, next_shared);
816 static int mpx_unmap_tables(struct mm_struct *mm,
817 unsigned long start, unsigned long end)
820 long __user *bd_entry, *bde_start, *bde_end;
821 unsigned long bt_addr;
824 * "Edge" bounds tables are those which are being used by the region
825 * (start -> end), but that may be shared with adjacent areas. If they
826 * turn out to be completely unshared, they will be freed. If they are
827 * shared, we will free the backing store (like an MADV_DONTNEED) for
828 * areas used by this region.
830 ret = unmap_edge_bts(mm, start, end);
832 /* non-present tables are OK */
835 /* Success, or no tables to unmap */
844 * Only unmap the bounds table that are
846 * 2. not at the edges of the mapping, even if full aligned
848 bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start);
849 bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1);
850 for (bd_entry = bde_start + 1; bd_entry < bde_end; bd_entry++) {
851 ret = get_bt_addr(mm, bd_entry, &bt_addr);
856 /* No table here, try the next one */
862 * Note: we are being strict here.
863 * Any time we run in to an issue
864 * unmapping tables, we stop and
870 ret = unmap_single_bt(mm, bd_entry, bt_addr);
879 * Free unused bounds tables covered in a virtual address region being
880 * munmap()ed. Assume end > start.
882 * This function will be called by do_munmap(), and the VMAs covering
883 * the virtual address region start...end have already been split if
884 * necessary, and the 'vma' is the first vma in this range (start -> end).
886 void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
887 unsigned long start, unsigned long end)
892 * Refuse to do anything unless userspace has asked
893 * the kernel to help manage the bounds tables,
895 if (!kernel_managing_mpx_tables(current->mm))
898 * This will look across the entire 'start -> end' range,
899 * and find all of the non-VM_MPX VMAs.
901 * To avoid recursion, if a VM_MPX vma is found in the range
902 * (start->end), we will not continue follow-up work. This
903 * recursion represents having bounds tables for bounds tables,
904 * which should not occur normally. Being strict about it here
905 * helps ensure that we do not have an exploitable stack overflow.
908 if (vma->vm_flags & VM_MPX)
911 } while (vma && vma->vm_start < end);
913 ret = mpx_unmap_tables(mm, start, end);
915 force_sig(SIGSEGV, current);