1
2
3
4
5
6
7
8
9
10
11
12#include <linux/export.h>
13#include <linux/init.h>
14#include <linux/kasan.h>
15#include <linux/kernel.h>
16#include <linux/linkage.h>
17#include <linux/memblock.h>
18#include <linux/memory.h>
19#include <linux/mm.h>
20#include <linux/module.h>
21#include <linux/printk.h>
22#include <linux/sched.h>
23#include <linux/sched/task_stack.h>
24#include <linux/slab.h>
25#include <linux/stacktrace.h>
26#include <linux/string.h>
27#include <linux/types.h>
28#include <linux/bug.h>
29
30#include "kasan.h"
31#include "../slab.h"
32
33depot_stack_handle_t kasan_save_stack(gfp_t flags)
34{
35 unsigned long entries[KASAN_STACK_DEPTH];
36 unsigned int nr_entries;
37
38 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
39 nr_entries = filter_irq_stacks(entries, nr_entries);
40 return stack_depot_save(entries, nr_entries, flags);
41}
42
43void kasan_set_track(struct kasan_track *track, gfp_t flags)
44{
45 track->pid = current->pid;
46 track->stack = kasan_save_stack(flags);
47}
48
49#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
50void kasan_enable_current(void)
51{
52 current->kasan_depth++;
53}
54EXPORT_SYMBOL(kasan_enable_current);
55
56void kasan_disable_current(void)
57{
58 current->kasan_depth--;
59}
60EXPORT_SYMBOL(kasan_disable_current);
61
62#endif
63
64void __kasan_unpoison_range(const void *address, size_t size)
65{
66 kasan_unpoison(address, size, false);
67}
68
69#ifdef CONFIG_KASAN_STACK
70
71void kasan_unpoison_task_stack(struct task_struct *task)
72{
73 void *base = task_stack_page(task);
74
75 kasan_unpoison(base, THREAD_SIZE, false);
76}
77
78
79asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
80{
81
82
83
84
85
86 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
87
88 kasan_unpoison(base, watermark - base, false);
89}
90#endif
91
92
93
94
95
96slab_flags_t __kasan_never_merge(void)
97{
98 if (kasan_stack_collection_enabled())
99 return SLAB_KASAN;
100 return 0;
101}
102
103void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
104{
105 u8 tag;
106 unsigned long i;
107
108 if (unlikely(PageHighMem(page)))
109 return;
110
111 tag = kasan_random_tag();
112 for (i = 0; i < (1 << order); i++)
113 page_kasan_tag_set(page + i, tag);
114 kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
115}
116
117void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
118{
119 if (likely(!PageHighMem(page)))
120 kasan_poison(page_address(page), PAGE_SIZE << order,
121 KASAN_FREE_PAGE, init);
122}
123
124
125
126
127
128static inline unsigned int optimal_redzone(unsigned int object_size)
129{
130 return
131 object_size <= 64 - 16 ? 16 :
132 object_size <= 128 - 32 ? 32 :
133 object_size <= 512 - 64 ? 64 :
134 object_size <= 4096 - 128 ? 128 :
135 object_size <= (1 << 14) - 256 ? 256 :
136 object_size <= (1 << 15) - 512 ? 512 :
137 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
138}
139
140void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
141 slab_flags_t *flags)
142{
143 unsigned int ok_size;
144 unsigned int optimal_size;
145
146
147
148
149
150
151
152
153 *flags |= SLAB_KASAN;
154
155 if (!kasan_stack_collection_enabled())
156 return;
157
158 ok_size = *size;
159
160
161 cache->kasan_info.alloc_meta_offset = *size;
162 *size += sizeof(struct kasan_alloc_meta);
163
164
165
166
167
168
169
170 if (*size > KMALLOC_MAX_SIZE) {
171 cache->kasan_info.alloc_meta_offset = 0;
172 *size = ok_size;
173
174 }
175
176
177 if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
178 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
179 return;
180 }
181
182
183
184
185
186
187
188
189
190
191
192 if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
193 cache->object_size < sizeof(struct kasan_free_meta)) {
194 ok_size = *size;
195
196 cache->kasan_info.free_meta_offset = *size;
197 *size += sizeof(struct kasan_free_meta);
198
199
200 if (*size > KMALLOC_MAX_SIZE) {
201 cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
202 *size = ok_size;
203 }
204 }
205
206
207 optimal_size = cache->object_size + optimal_redzone(cache->object_size);
208
209 if (optimal_size > KMALLOC_MAX_SIZE)
210 optimal_size = KMALLOC_MAX_SIZE;
211
212 if (*size < optimal_size)
213 *size = optimal_size;
214}
215
216void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
217{
218 cache->kasan_info.is_kmalloc = true;
219}
220
221size_t __kasan_metadata_size(struct kmem_cache *cache)
222{
223 if (!kasan_stack_collection_enabled())
224 return 0;
225 return (cache->kasan_info.alloc_meta_offset ?
226 sizeof(struct kasan_alloc_meta) : 0) +
227 (cache->kasan_info.free_meta_offset ?
228 sizeof(struct kasan_free_meta) : 0);
229}
230
231struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
232 const void *object)
233{
234 if (!cache->kasan_info.alloc_meta_offset)
235 return NULL;
236 return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
237}
238
239#ifdef CONFIG_KASAN_GENERIC
240struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
241 const void *object)
242{
243 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
244 if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
245 return NULL;
246 return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
247}
248#endif
249
250void __kasan_poison_slab(struct page *page)
251{
252 unsigned long i;
253
254 for (i = 0; i < compound_nr(page); i++)
255 page_kasan_tag_reset(page + i);
256 kasan_poison(page_address(page), page_size(page),
257 KASAN_KMALLOC_REDZONE, false);
258}
259
260void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
261{
262 kasan_unpoison(object, cache->object_size, false);
263}
264
265void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
266{
267 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
268 KASAN_KMALLOC_REDZONE, false);
269}
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285static inline u8 assign_tag(struct kmem_cache *cache,
286 const void *object, bool init)
287{
288 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
289 return 0xff;
290
291
292
293
294
295 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
296 return init ? KASAN_TAG_KERNEL : kasan_random_tag();
297
298
299#ifdef CONFIG_SLAB
300
301 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
302#else
303
304
305
306
307 return init ? kasan_random_tag() : get_tag(object);
308#endif
309}
310
311void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
312 const void *object)
313{
314 struct kasan_alloc_meta *alloc_meta;
315
316 if (kasan_stack_collection_enabled()) {
317 alloc_meta = kasan_get_alloc_meta(cache, object);
318 if (alloc_meta)
319 __memset(alloc_meta, 0, sizeof(*alloc_meta));
320 }
321
322
323 object = set_tag(object, assign_tag(cache, object, true));
324
325 return (void *)object;
326}
327
328static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
329 unsigned long ip, bool quarantine, bool init)
330{
331 u8 tag;
332 void *tagged_object;
333
334 if (!kasan_arch_is_ready())
335 return false;
336
337 tag = get_tag(object);
338 tagged_object = object;
339 object = kasan_reset_tag(object);
340
341 if (is_kfence_address(object))
342 return false;
343
344 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
345 object)) {
346 kasan_report_invalid_free(tagged_object, ip);
347 return true;
348 }
349
350
351 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
352 return false;
353
354 if (!kasan_byte_accessible(tagged_object)) {
355 kasan_report_invalid_free(tagged_object, ip);
356 return true;
357 }
358
359 kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
360 KASAN_KMALLOC_FREE, init);
361
362 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
363 return false;
364
365 if (kasan_stack_collection_enabled())
366 kasan_set_free_info(cache, object, tag);
367
368 return kasan_quarantine_put(cache, object);
369}
370
371bool __kasan_slab_free(struct kmem_cache *cache, void *object,
372 unsigned long ip, bool init)
373{
374 return ____kasan_slab_free(cache, object, ip, true, init);
375}
376
377static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
378{
379 if (ptr != page_address(virt_to_head_page(ptr))) {
380 kasan_report_invalid_free(ptr, ip);
381 return true;
382 }
383
384 if (!kasan_byte_accessible(ptr)) {
385 kasan_report_invalid_free(ptr, ip);
386 return true;
387 }
388
389
390
391
392
393
394 return false;
395}
396
397void __kasan_kfree_large(void *ptr, unsigned long ip)
398{
399 ____kasan_kfree_large(ptr, ip);
400}
401
402void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
403{
404 struct page *page;
405
406 page = virt_to_head_page(ptr);
407
408
409
410
411
412
413
414 if (unlikely(!PageSlab(page))) {
415 if (____kasan_kfree_large(ptr, ip))
416 return;
417 kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false);
418 } else {
419 ____kasan_slab_free(page->slab_cache, ptr, ip, false, false);
420 }
421}
422
423static void set_alloc_info(struct kmem_cache *cache, void *object,
424 gfp_t flags, bool is_kmalloc)
425{
426 struct kasan_alloc_meta *alloc_meta;
427
428
429 if (cache->kasan_info.is_kmalloc && !is_kmalloc)
430 return;
431
432 alloc_meta = kasan_get_alloc_meta(cache, object);
433 if (alloc_meta)
434 kasan_set_track(&alloc_meta->alloc_track, flags);
435}
436
437void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
438 void *object, gfp_t flags, bool init)
439{
440 u8 tag;
441 void *tagged_object;
442
443 if (gfpflags_allow_blocking(flags))
444 kasan_quarantine_reduce();
445
446 if (unlikely(object == NULL))
447 return NULL;
448
449 if (is_kfence_address(object))
450 return (void *)object;
451
452
453
454
455
456 tag = assign_tag(cache, object, false);
457 tagged_object = set_tag(object, tag);
458
459
460
461
462
463 kasan_unpoison(tagged_object, cache->object_size, init);
464
465
466 if (kasan_stack_collection_enabled())
467 set_alloc_info(cache, (void *)object, flags, false);
468
469 return tagged_object;
470}
471
472static inline void *____kasan_kmalloc(struct kmem_cache *cache,
473 const void *object, size_t size, gfp_t flags)
474{
475 unsigned long redzone_start;
476 unsigned long redzone_end;
477
478 if (gfpflags_allow_blocking(flags))
479 kasan_quarantine_reduce();
480
481 if (unlikely(object == NULL))
482 return NULL;
483
484 if (is_kfence_address(kasan_reset_tag(object)))
485 return (void *)object;
486
487
488
489
490
491
492
493
494
495
496
497 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
498 kasan_poison_last_granule((void *)object, size);
499
500
501 redzone_start = round_up((unsigned long)(object + size),
502 KASAN_GRANULE_SIZE);
503 redzone_end = round_up((unsigned long)(object + cache->object_size),
504 KASAN_GRANULE_SIZE);
505 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
506 KASAN_KMALLOC_REDZONE, false);
507
508
509
510
511
512 if (kasan_stack_collection_enabled())
513 set_alloc_info(cache, (void *)object, flags, true);
514
515
516 return (void *)object;
517}
518
519void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
520 size_t size, gfp_t flags)
521{
522 return ____kasan_kmalloc(cache, object, size, flags);
523}
524EXPORT_SYMBOL(__kasan_kmalloc);
525
526void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
527 gfp_t flags)
528{
529 unsigned long redzone_start;
530 unsigned long redzone_end;
531
532 if (gfpflags_allow_blocking(flags))
533 kasan_quarantine_reduce();
534
535 if (unlikely(ptr == NULL))
536 return NULL;
537
538
539
540
541
542
543
544
545
546
547
548 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
549 kasan_poison_last_granule(ptr, size);
550
551
552 redzone_start = round_up((unsigned long)(ptr + size),
553 KASAN_GRANULE_SIZE);
554 redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
555 kasan_poison((void *)redzone_start, redzone_end - redzone_start,
556 KASAN_PAGE_REDZONE, false);
557
558 return (void *)ptr;
559}
560
561void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
562{
563 struct page *page;
564
565 if (unlikely(object == ZERO_SIZE_PTR))
566 return (void *)object;
567
568
569
570
571
572
573 kasan_unpoison(object, size, false);
574
575 page = virt_to_head_page(object);
576
577
578 if (unlikely(!PageSlab(page)))
579 return __kasan_kmalloc_large(object, size, flags);
580 else
581 return ____kasan_kmalloc(page->slab_cache, object, size, flags);
582}
583
584bool __kasan_check_byte(const void *address, unsigned long ip)
585{
586 if (!kasan_byte_accessible(address)) {
587 kasan_report((unsigned long)address, 1, false, ip);
588 return false;
589 }
590 return true;
591}
592