1
2
3
4
5
6
7
8#include <linux/hardirq.h>
9#include <linux/kallsyms.h>
10#include <linux/module.h>
11#include <linux/mutex.h>
12#include <linux/printk.h>
13#include <linux/ratelimit.h>
14#include <linux/rcupdate.h>
15#include <linux/vmalloc.h>
16#include <asm/cacheflush.h>
17#include <asm/set_memory.h>
18
19
20#ifdef CONFIG_CFI_PERMISSIVE
21#define cfi_failure_handler __ubsan_handle_cfi_check_fail
22#else
23#define cfi_failure_handler __ubsan_handle_cfi_check_fail_abort
24#endif
25
26static inline void handle_cfi_failure(void *ptr)
27{
28 if (IS_ENABLED(CONFIG_CFI_PERMISSIVE))
29 WARN_RATELIMIT(1, "CFI failure (target: %pS):\n", ptr);
30 else
31 panic("CFI failure (target: %pS)\n", ptr);
32}
33
34#ifdef CONFIG_MODULES
35#ifdef CONFIG_CFI_CLANG_SHADOW
36
37
38
39
40typedef u16 shadow_t;
41#define SHADOW_INVALID ((shadow_t)~0UL)
42
43struct cfi_shadow {
44
45 unsigned long base;
46
47 shadow_t shadow[1];
48} __packed;
49
50
51
52
53
54#define __SHADOW_RANGE (_UL(SZ_128M) >> PAGE_SHIFT)
55
56
57#define __SHADOW_PAGES ((__SHADOW_RANGE * sizeof(shadow_t)) >> PAGE_SHIFT)
58#define SHADOW_PAGES max(1UL, __SHADOW_PAGES)
59#define SHADOW_SIZE (SHADOW_PAGES << PAGE_SHIFT)
60
61
62#define SHADOW_ARR_SIZE (SHADOW_SIZE - offsetof(struct cfi_shadow, shadow))
63#define SHADOW_ARR_SLOTS (SHADOW_ARR_SIZE / sizeof(shadow_t))
64
65static DEFINE_MUTEX(shadow_update_lock);
66static struct cfi_shadow __rcu *cfi_shadow __read_mostly;
67
68
69static inline int ptr_to_shadow(const struct cfi_shadow *s, unsigned long ptr)
70{
71 unsigned long index;
72 unsigned long page = ptr >> PAGE_SHIFT;
73
74 if (unlikely(page < s->base))
75 return -1;
76
77 index = page - s->base;
78
79 if (index >= SHADOW_ARR_SLOTS)
80 return -1;
81
82 return (int)index;
83}
84
85
86static inline unsigned long shadow_to_ptr(const struct cfi_shadow *s,
87 int index)
88{
89 if (unlikely(index < 0 || index >= SHADOW_ARR_SLOTS))
90 return 0;
91
92 return (s->base + index) << PAGE_SHIFT;
93}
94
95
96static inline unsigned long shadow_to_check_fn(const struct cfi_shadow *s,
97 int index)
98{
99 if (unlikely(index < 0 || index >= SHADOW_ARR_SLOTS))
100 return 0;
101
102 if (unlikely(s->shadow[index] == SHADOW_INVALID))
103 return 0;
104
105
106 return (s->base + s->shadow[index]) << PAGE_SHIFT;
107}
108
109static void prepare_next_shadow(const struct cfi_shadow __rcu *prev,
110 struct cfi_shadow *next)
111{
112 int i, index, check;
113
114
115 memset(next->shadow, 0xFF, SHADOW_ARR_SIZE);
116
117 if (!prev)
118 return;
119
120
121 if (prev->base == next->base) {
122 memcpy(next->shadow, prev->shadow, SHADOW_ARR_SIZE);
123 return;
124 }
125
126
127 for (i = 0; i < SHADOW_ARR_SLOTS; ++i) {
128 if (prev->shadow[i] == SHADOW_INVALID)
129 continue;
130
131 index = ptr_to_shadow(next, shadow_to_ptr(prev, i));
132 if (index < 0)
133 continue;
134
135 check = ptr_to_shadow(next,
136 shadow_to_check_fn(prev, prev->shadow[i]));
137 if (check < 0)
138 continue;
139
140 next->shadow[index] = (shadow_t)check;
141 }
142}
143
144static void add_module_to_shadow(struct cfi_shadow *s, struct module *mod,
145 unsigned long min_addr, unsigned long max_addr)
146{
147 int check_index;
148 unsigned long check = (unsigned long)mod->cfi_check;
149 unsigned long ptr;
150
151 if (unlikely(!PAGE_ALIGNED(check))) {
152 pr_warn("cfi: not using shadow for module %s\n", mod->name);
153 return;
154 }
155
156 check_index = ptr_to_shadow(s, check);
157 if (check_index < 0)
158 return;
159
160
161 for (ptr = min_addr; ptr <= max_addr; ptr += PAGE_SIZE) {
162 int index = ptr_to_shadow(s, ptr);
163
164 if (index >= 0) {
165
166 WARN_ON_ONCE(s->shadow[index] != SHADOW_INVALID);
167 s->shadow[index] = (shadow_t)check_index;
168 }
169 }
170}
171
172static void remove_module_from_shadow(struct cfi_shadow *s, struct module *mod,
173 unsigned long min_addr, unsigned long max_addr)
174{
175 unsigned long ptr;
176
177 for (ptr = min_addr; ptr <= max_addr; ptr += PAGE_SIZE) {
178 int index = ptr_to_shadow(s, ptr);
179
180 if (index >= 0)
181 s->shadow[index] = SHADOW_INVALID;
182 }
183}
184
185typedef void (*update_shadow_fn)(struct cfi_shadow *, struct module *,
186 unsigned long min_addr, unsigned long max_addr);
187
188static void update_shadow(struct module *mod, unsigned long base_addr,
189 update_shadow_fn fn)
190{
191 struct cfi_shadow *prev;
192 struct cfi_shadow *next;
193 unsigned long min_addr, max_addr;
194
195 next = vmalloc(SHADOW_SIZE);
196
197 mutex_lock(&shadow_update_lock);
198 prev = rcu_dereference_protected(cfi_shadow,
199 mutex_is_locked(&shadow_update_lock));
200
201 if (next) {
202 next->base = base_addr >> PAGE_SHIFT;
203 prepare_next_shadow(prev, next);
204
205 min_addr = (unsigned long)mod->core_layout.base;
206 max_addr = min_addr + mod->core_layout.text_size;
207 fn(next, mod, min_addr & PAGE_MASK, max_addr & PAGE_MASK);
208
209 set_memory_ro((unsigned long)next, SHADOW_PAGES);
210 }
211
212 rcu_assign_pointer(cfi_shadow, next);
213 mutex_unlock(&shadow_update_lock);
214 synchronize_rcu();
215
216 if (prev) {
217 set_memory_rw((unsigned long)prev, SHADOW_PAGES);
218 vfree(prev);
219 }
220}
221
222void cfi_module_add(struct module *mod, unsigned long base_addr)
223{
224 update_shadow(mod, base_addr, add_module_to_shadow);
225}
226
227void cfi_module_remove(struct module *mod, unsigned long base_addr)
228{
229 update_shadow(mod, base_addr, remove_module_from_shadow);
230}
231
232static inline cfi_check_fn ptr_to_check_fn(const struct cfi_shadow __rcu *s,
233 unsigned long ptr)
234{
235 int index;
236
237 if (unlikely(!s))
238 return NULL;
239
240 index = ptr_to_shadow(s, ptr);
241 if (index < 0)
242 return NULL;
243
244 return (cfi_check_fn)shadow_to_check_fn(s, index);
245}
246
247static inline cfi_check_fn find_shadow_check_fn(unsigned long ptr)
248{
249 cfi_check_fn fn;
250
251 rcu_read_lock_sched_notrace();
252 fn = ptr_to_check_fn(rcu_dereference_sched(cfi_shadow), ptr);
253 rcu_read_unlock_sched_notrace();
254
255 return fn;
256}
257
258#else
259
260static inline cfi_check_fn find_shadow_check_fn(unsigned long ptr)
261{
262 return NULL;
263}
264
265#endif
266
267static inline cfi_check_fn find_module_check_fn(unsigned long ptr)
268{
269 cfi_check_fn fn = NULL;
270 struct module *mod;
271
272 rcu_read_lock_sched_notrace();
273 mod = __module_address(ptr);
274 if (mod)
275 fn = mod->cfi_check;
276 rcu_read_unlock_sched_notrace();
277
278 return fn;
279}
280
281static inline cfi_check_fn find_check_fn(unsigned long ptr)
282{
283 cfi_check_fn fn = NULL;
284 unsigned long flags;
285 bool rcu_idle;
286
287 if (is_kernel_text(ptr))
288 return __cfi_check;
289
290
291
292
293
294
295 rcu_idle = !rcu_is_watching();
296 if (rcu_idle) {
297 local_irq_save(flags);
298 ct_irq_enter();
299 }
300
301 if (IS_ENABLED(CONFIG_CFI_CLANG_SHADOW))
302 fn = find_shadow_check_fn(ptr);
303 if (!fn)
304 fn = find_module_check_fn(ptr);
305
306 if (rcu_idle) {
307 ct_irq_exit();
308 local_irq_restore(flags);
309 }
310
311 return fn;
312}
313
314void __cfi_slowpath_diag(uint64_t id, void *ptr, void *diag)
315{
316 cfi_check_fn fn = find_check_fn((unsigned long)ptr);
317
318 if (likely(fn))
319 fn(id, ptr, diag);
320 else
321 handle_cfi_failure(ptr);
322}
323EXPORT_SYMBOL(__cfi_slowpath_diag);
324
325#else
326
327void __cfi_slowpath_diag(uint64_t id, void *ptr, void *diag)
328{
329 handle_cfi_failure(ptr);
330}
331EXPORT_SYMBOL(__cfi_slowpath_diag);
332
333#endif
334
335void cfi_failure_handler(void *data, void *ptr, void *vtable)
336{
337 handle_cfi_failure(ptr);
338}
339EXPORT_SYMBOL(cfi_failure_handler);
340