aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/probes/decode-insn.c
blob: f7931d900bca8fe171a578c3a7e63f5e8a92c931 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
/*
 * arch/arm64/kernel/probes/decode-insn.c
 *
 * Copyright (C) 2013 Linaro Limited.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */

#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/module.h>
#include <asm/kprobes.h>
#include <asm/insn.h>
#include <asm/sections.h>

#include "decode-insn.h"
#include "simulate-insn.h"

static bool __kprobes aarch64_insn_is_steppable(u32 insn)
{
	/*
	 * Branch instructions will write a new value into the PC which is
	 * likely to be relative to the XOL address and therefore invalid.
	 * Deliberate generation of an exception during stepping is also not
	 * currently safe. Lastly, MSR instructions can do any number of nasty
	 * things we can't handle during single-stepping.
	 */
	if (aarch64_get_insn_class(insn) == AARCH64_INSN_CLS_BR_SYS) {
		if (aarch64_insn_is_branch(insn) ||
		    aarch64_insn_is_msr_imm(insn) ||
		    aarch64_insn_is_msr_reg(insn) ||
		    aarch64_insn_is_exception(insn) ||
		    aarch64_insn_is_eret(insn))
			return false;

		/*
		 * The MRS instruction may not return a correct value when
		 * executing in the single-stepping environment. We do make one
		 * exception, for reading the DAIF bits.
		 */
		if (aarch64_insn_is_mrs(insn))
			return aarch64_insn_extract_system_reg(insn)
			     != AARCH64_INSN_SPCLREG_DAIF;

		/*
		 * The HINT instruction is is problematic when single-stepping,
		 * except for the NOP case.
		 */
		if (aarch64_insn_is_hint(insn))
			return aarch64_insn_is_nop(insn);

		return true;
	}

	/*
	 * Instructions which load PC relative literals are not going to work
	 * when executed from an XOL slot. Instructions doing an exclusive
	 * load/store are not going to complete successfully when single-step
	 * exception handling happens in the middle of the sequence.
	 */
	if (aarch64_insn_uses_literal(insn) ||
	    aarch64_insn_is_exclusive(insn))
		return false;

	return true;
}

/* Return:
 *   INSN_REJECTED     If instruction is one not allowed to kprobe,
 *   INSN_GOOD         If instruction is supported and uses instruction slot,
 *   INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
 */
static enum kprobe_insn __kprobes
arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
{
	/*
	 * Instructions reading or modifying the PC won't work from the XOL
	 * slot.
	 */
	if (aarch64_insn_is_steppable(insn))
		return INSN_GOOD;

	if (aarch64_insn_is_bcond(insn)) {
		asi->handler = simulate_b_cond;
	} else if (aarch64_insn_is_cbz(insn) ||
	    aarch64_insn_is_cbnz(insn)) {
		asi->handler = simulate_cbz_cbnz;
	} else if (aarch64_insn_is_tbz(insn) ||
	    aarch64_insn_is_tbnz(insn)) {
		asi->handler = simulate_tbz_tbnz;
	} else if (aarch64_insn_is_adr_adrp(insn)) {
		asi->handler = simulate_adr_adrp;
	} else if (aarch64_insn_is_b(insn) ||
	    aarch64_insn_is_bl(insn)) {
		asi->handler = simulate_b_bl;
	} else if (aarch64_insn_is_br(insn) ||
	    aarch64_insn_is_blr(insn) ||
	    aarch64_insn_is_ret(insn)) {
		asi->handler = simulate_br_blr_ret;
	} else if (aarch64_insn_is_ldr_lit(insn)) {
		asi->handler = simulate_ldr_literal;
	} else if (aarch64_insn_is_ldrsw_lit(insn)) {
		asi->handler = simulate_ldrsw_literal;
	} else {
		/*
		 * Instruction cannot be stepped out-of-line and we don't
		 * (yet) simulate it.
		 */
		return INSN_REJECTED;
	}

	return INSN_GOOD_NO_SLOT;
}

static bool __kprobes
is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
{
	while (scan_start > scan_end) {
		/*
		 * atomic region starts from exclusive load and ends with
		 * exclusive store.
		 */
		if (aarch64_insn_is_store_ex(le32_to_cpu(*scan_start)))
			return false;
		else if (aarch64_insn_is_load_ex(le32_to_cpu(*scan_start)))
			return true;
		scan_start--;
	}

	return false;
}

enum kprobe_insn __kprobes
arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
{
	enum kprobe_insn decoded;
	kprobe_opcode_t insn = le32_to_cpu(*addr);
	kprobe_opcode_t *scan_start = addr - 1;
	kprobe_opcode_t *scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
	struct module *mod;
#endif

	if (addr >= (kprobe_opcode_t *)_text &&
	    scan_end < (kprobe_opcode_t *)_text)
		scan_end = (kprobe_opcode_t *)_text;
#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
	else {
		preempt_disable();
		mod = __module_address((unsigned long)addr);
		if (mod && within_module_init((unsigned long)addr, mod) &&
			!within_module_init((unsigned long)scan_end, mod))
			scan_end = (kprobe_opcode_t *)mod->module_init;
		else if (mod && within_module_core((unsigned long)addr, mod) &&
			!within_module_core((unsigned long)scan_end, mod))
			scan_end = (kprobe_opcode_t *)mod->module_core;
		preempt_enable();
	}
#endif
	decoded = arm_probe_decode_insn(insn, asi);

	if (decoded == INSN_REJECTED ||
			is_probed_address_atomic(scan_start, scan_end))
		return INSN_REJECTED;

	return decoded;
}