1
2
3
4
5
6
7
8
9#include <linux/sched/task_stack.h>
10#include <linux/sched/debug.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/export.h>
14#include <linux/kallsyms.h>
15#include <linux/stacktrace.h>
16#include <linux/interrupt.h>
17
18
19
20
21
22
23
24void stack_trace_print(const unsigned long *entries, unsigned int nr_entries,
25 int spaces)
26{
27 unsigned int i;
28
29 if (WARN_ON(!entries))
30 return;
31
32 for (i = 0; i < nr_entries; i++)
33 printk("%*c%pS\n", 1 + spaces, ' ', (void *)entries[i]);
34}
35EXPORT_SYMBOL_GPL(stack_trace_print);
36
37
38
39
40
41
42
43
44
45
46
47int stack_trace_snprint(char *buf, size_t size, const unsigned long *entries,
48 unsigned int nr_entries, int spaces)
49{
50 unsigned int generated, i, total = 0;
51
52 if (WARN_ON(!entries))
53 return 0;
54
55 for (i = 0; i < nr_entries && size; i++) {
56 generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
57 (void *)entries[i]);
58
59 total += generated;
60 if (generated >= size) {
61 buf += size;
62 size = 0;
63 } else {
64 buf += generated;
65 size -= generated;
66 }
67 }
68
69 return total;
70}
71EXPORT_SYMBOL_GPL(stack_trace_snprint);
72
73#ifdef CONFIG_ARCH_STACKWALK
74
75struct stacktrace_cookie {
76 unsigned long *store;
77 unsigned int size;
78 unsigned int skip;
79 unsigned int len;
80};
81
82static bool stack_trace_consume_entry(void *cookie, unsigned long addr)
83{
84 struct stacktrace_cookie *c = cookie;
85
86 if (c->len >= c->size)
87 return false;
88
89 if (c->skip > 0) {
90 c->skip--;
91 return true;
92 }
93 c->store[c->len++] = addr;
94 return c->len < c->size;
95}
96
97static bool stack_trace_consume_entry_nosched(void *cookie, unsigned long addr)
98{
99 if (in_sched_functions(addr))
100 return true;
101 return stack_trace_consume_entry(cookie, addr);
102}
103
104
105
106
107
108
109
110
111
112unsigned int stack_trace_save(unsigned long *store, unsigned int size,
113 unsigned int skipnr)
114{
115 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
116 struct stacktrace_cookie c = {
117 .store = store,
118 .size = size,
119 .skip = skipnr + 1,
120 };
121
122 arch_stack_walk(consume_entry, &c, current, NULL);
123 return c.len;
124}
125EXPORT_SYMBOL_GPL(stack_trace_save);
126
127
128
129
130
131
132
133
134
135
136unsigned int stack_trace_save_tsk(struct task_struct *tsk, unsigned long *store,
137 unsigned int size, unsigned int skipnr)
138{
139 stack_trace_consume_fn consume_entry = stack_trace_consume_entry_nosched;
140 struct stacktrace_cookie c = {
141 .store = store,
142 .size = size,
143
144 .skip = skipnr + (current == tsk),
145 };
146
147 if (!try_get_task_stack(tsk))
148 return 0;
149
150 arch_stack_walk(consume_entry, &c, tsk, NULL);
151 put_task_stack(tsk);
152 return c.len;
153}
154
155
156
157
158
159
160
161
162
163
164unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
165 unsigned int size, unsigned int skipnr)
166{
167 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
168 struct stacktrace_cookie c = {
169 .store = store,
170 .size = size,
171 .skip = skipnr,
172 };
173
174 arch_stack_walk(consume_entry, &c, current, regs);
175 return c.len;
176}
177
178#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
179
180
181
182
183
184
185
186
187
188
189
190
191int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
192 unsigned int size)
193{
194 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
195 struct stacktrace_cookie c = {
196 .store = store,
197 .size = size,
198 };
199 int ret;
200
201
202
203
204
205 if (!try_get_task_stack(tsk))
206 return 0;
207
208 ret = arch_stack_walk_reliable(consume_entry, &c, tsk);
209 put_task_stack(tsk);
210 return ret ? ret : c.len;
211}
212#endif
213
214#ifdef CONFIG_USER_STACKTRACE_SUPPORT
215
216
217
218
219
220
221
222unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
223{
224 stack_trace_consume_fn consume_entry = stack_trace_consume_entry;
225 struct stacktrace_cookie c = {
226 .store = store,
227 .size = size,
228 };
229
230
231 if (current->flags & PF_KTHREAD)
232 return 0;
233
234 arch_stack_walk_user(consume_entry, &c, task_pt_regs(current));
235
236 return c.len;
237}
238#endif
239
240#else
241
242
243
244
245
246
247__weak void
248save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
249{
250 WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
251}
252
253__weak void
254save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
255{
256 WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n");
257}
258
259
260
261
262
263
264
265
266
267unsigned int stack_trace_save(unsigned long *store, unsigned int size,
268 unsigned int skipnr)
269{
270 struct stack_trace trace = {
271 .entries = store,
272 .max_entries = size,
273 .skip = skipnr + 1,
274 };
275
276 save_stack_trace(&trace);
277 return trace.nr_entries;
278}
279EXPORT_SYMBOL_GPL(stack_trace_save);
280
281
282
283
284
285
286
287
288
289
290unsigned int stack_trace_save_tsk(struct task_struct *task,
291 unsigned long *store, unsigned int size,
292 unsigned int skipnr)
293{
294 struct stack_trace trace = {
295 .entries = store,
296 .max_entries = size,
297
298 .skip = skipnr + (current == task),
299 };
300
301 save_stack_trace_tsk(task, &trace);
302 return trace.nr_entries;
303}
304
305
306
307
308
309
310
311
312
313
314unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
315 unsigned int size, unsigned int skipnr)
316{
317 struct stack_trace trace = {
318 .entries = store,
319 .max_entries = size,
320 .skip = skipnr,
321 };
322
323 save_stack_trace_regs(regs, &trace);
324 return trace.nr_entries;
325}
326
327#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
328
329
330
331
332
333
334
335
336
337
338
339
340int stack_trace_save_tsk_reliable(struct task_struct *tsk, unsigned long *store,
341 unsigned int size)
342{
343 struct stack_trace trace = {
344 .entries = store,
345 .max_entries = size,
346 };
347 int ret = save_stack_trace_tsk_reliable(tsk, &trace);
348
349 return ret ? ret : trace.nr_entries;
350}
351#endif
352
353#ifdef CONFIG_USER_STACKTRACE_SUPPORT
354
355
356
357
358
359
360
361unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
362{
363 struct stack_trace trace = {
364 .entries = store,
365 .max_entries = size,
366 };
367
368 save_stack_trace_user(&trace);
369 return trace.nr_entries;
370}
371#endif
372
373#endif
374
375static inline bool in_irqentry_text(unsigned long ptr)
376{
377 return (ptr >= (unsigned long)&__irqentry_text_start &&
378 ptr < (unsigned long)&__irqentry_text_end) ||
379 (ptr >= (unsigned long)&__softirqentry_text_start &&
380 ptr < (unsigned long)&__softirqentry_text_end);
381}
382
383
384
385
386
387
388
389
390unsigned int filter_irq_stacks(unsigned long *entries, unsigned int nr_entries)
391{
392 unsigned int i;
393
394 for (i = 0; i < nr_entries; i++) {
395 if (in_irqentry_text(entries[i])) {
396
397 return i + 1;
398 }
399 }
400 return nr_entries;
401}
402EXPORT_SYMBOL_GPL(filter_irq_stacks);
403