1#include <linux/slab.h>
2#include <linux/file.h>
3#include <linux/fdtable.h>
4#include <linux/mm.h>
5#include <linux/stat.h>
6#include <linux/fcntl.h>
7#include <linux/swap.h>
8#include <linux/string.h>
9#include <linux/init.h>
10#include <linux/pagemap.h>
11#include <linux/perf_event.h>
12#include <linux/highmem.h>
13#include <linux/spinlock.h>
14#include <linux/key.h>
15#include <linux/personality.h>
16#include <linux/binfmts.h>
17#include <linux/coredump.h>
18#include <linux/utsname.h>
19#include <linux/pid_namespace.h>
20#include <linux/module.h>
21#include <linux/namei.h>
22#include <linux/mount.h>
23#include <linux/security.h>
24#include <linux/syscalls.h>
25#include <linux/tsacct_kern.h>
26#include <linux/cn_proc.h>
27#include <linux/audit.h>
28#include <linux/tracehook.h>
29#include <linux/kmod.h>
30#include <linux/fsnotify.h>
31#include <linux/fs_struct.h>
32#include <linux/pipe_fs_i.h>
33#include <linux/oom.h>
34#include <linux/compat.h>
35
36#include <asm/uaccess.h>
37#include <asm/mmu_context.h>
38#include <asm/tlb.h>
39#include <asm/exec.h>
40
41#include <trace/events/task.h>
42#include "internal.h"
43#include "coredump.h"
44
45#include <trace/events/sched.h>
46
47int core_uses_pid;
48char core_pattern[CORENAME_MAX_SIZE] = "core";
49unsigned int core_pipe_limit;
50
51struct core_name {
52 char *corename;
53 int used, size;
54};
55static atomic_t call_count = ATOMIC_INIT(1);
56
57
58
59static int expand_corename(struct core_name *cn)
60{
61 char *old_corename = cn->corename;
62
63 cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
64 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
65
66 if (!cn->corename) {
67 kfree(old_corename);
68 return -ENOMEM;
69 }
70
71 return 0;
72}
73
74static int cn_printf(struct core_name *cn, const char *fmt, ...)
75{
76 char *cur;
77 int need;
78 int ret;
79 va_list arg;
80
81 va_start(arg, fmt);
82 need = vsnprintf(NULL, 0, fmt, arg);
83 va_end(arg);
84
85 if (likely(need < cn->size - cn->used - 1))
86 goto out_printf;
87
88 ret = expand_corename(cn);
89 if (ret)
90 goto expand_fail;
91
92out_printf:
93 cur = cn->corename + cn->used;
94 va_start(arg, fmt);
95 vsnprintf(cur, need + 1, fmt, arg);
96 va_end(arg);
97 cn->used += need;
98 return 0;
99
100expand_fail:
101 return ret;
102}
103
104static void cn_escape(char *str)
105{
106 for (; *str; str++)
107 if (*str == '/')
108 *str = '!';
109}
110
111static int cn_print_exe_file(struct core_name *cn)
112{
113 struct file *exe_file;
114 char *pathbuf, *path;
115 int ret;
116
117 exe_file = get_mm_exe_file(current->mm);
118 if (!exe_file) {
119 char *commstart = cn->corename + cn->used;
120 ret = cn_printf(cn, "%s (path unknown)", current->comm);
121 cn_escape(commstart);
122 return ret;
123 }
124
125 pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
126 if (!pathbuf) {
127 ret = -ENOMEM;
128 goto put_exe_file;
129 }
130
131 path = d_path(&exe_file->f_path, pathbuf, PATH_MAX);
132 if (IS_ERR(path)) {
133 ret = PTR_ERR(path);
134 goto free_buf;
135 }
136
137 cn_escape(path);
138
139 ret = cn_printf(cn, "%s", path);
140
141free_buf:
142 kfree(pathbuf);
143put_exe_file:
144 fput(exe_file);
145 return ret;
146}
147
148
149
150
151
152static int format_corename(struct core_name *cn, struct coredump_params *cprm)
153{
154 const struct cred *cred = current_cred();
155 const char *pat_ptr = core_pattern;
156 int ispipe = (*pat_ptr == '|');
157 int pid_in_pattern = 0;
158 int err = 0;
159
160 cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
161 cn->corename = kmalloc(cn->size, GFP_KERNEL);
162 cn->used = 0;
163
164 if (!cn->corename)
165 return -ENOMEM;
166
167
168
169 while (*pat_ptr) {
170 if (*pat_ptr != '%') {
171 if (*pat_ptr == 0)
172 goto out;
173 err = cn_printf(cn, "%c", *pat_ptr++);
174 } else {
175 switch (*++pat_ptr) {
176
177 case 0:
178 goto out;
179
180 case '%':
181 err = cn_printf(cn, "%c", '%');
182 break;
183
184 case 'p':
185 pid_in_pattern = 1;
186 err = cn_printf(cn, "%d",
187 task_tgid_vnr(current));
188 break;
189
190 case 'u':
191 err = cn_printf(cn, "%d", cred->uid);
192 break;
193
194 case 'g':
195 err = cn_printf(cn, "%d", cred->gid);
196 break;
197 case 'd':
198 err = cn_printf(cn, "%d",
199 __get_dumpable(cprm->mm_flags));
200 break;
201
202 case 's':
203 err = cn_printf(cn, "%ld", cprm->siginfo->si_signo);
204 break;
205
206 case 't': {
207 struct timeval tv;
208 do_gettimeofday(&tv);
209 err = cn_printf(cn, "%lu", tv.tv_sec);
210 break;
211 }
212
213 case 'h': {
214 char *namestart = cn->corename + cn->used;
215 down_read(&uts_sem);
216 err = cn_printf(cn, "%s",
217 utsname()->nodename);
218 up_read(&uts_sem);
219 cn_escape(namestart);
220 break;
221 }
222
223 case 'e': {
224 char *commstart = cn->corename + cn->used;
225 err = cn_printf(cn, "%s", current->comm);
226 cn_escape(commstart);
227 break;
228 }
229 case 'E':
230 err = cn_print_exe_file(cn);
231 break;
232
233 case 'c':
234 err = cn_printf(cn, "%lu",
235 rlimit(RLIMIT_CORE));
236 break;
237 default:
238 break;
239 }
240 ++pat_ptr;
241 }
242
243 if (err)
244 return err;
245 }
246
247
248
249
250
251
252 if (!ispipe && !pid_in_pattern && core_uses_pid) {
253 err = cn_printf(cn, ".%d", task_tgid_vnr(current));
254 if (err)
255 return err;
256 }
257out:
258 return ispipe;
259}
260
261static int zap_process(struct task_struct *start, int exit_code)
262{
263 struct task_struct *t;
264 int nr = 0;
265
266 start->signal->flags = SIGNAL_GROUP_EXIT;
267 start->signal->group_exit_code = exit_code;
268 start->signal->group_stop_count = 0;
269
270 t = start;
271 do {
272 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
273 if (t != current && t->mm) {
274 sigaddset(&t->pending.signal, SIGKILL);
275 signal_wake_up(t, 1);
276 nr++;
277 }
278 } while_each_thread(start, t);
279
280 return nr;
281}
282
283static inline int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
284 struct core_state *core_state, int exit_code)
285{
286 struct task_struct *g, *p;
287 unsigned long flags;
288 int nr = -EAGAIN;
289
290 spin_lock_irq(&tsk->sighand->siglock);
291 if (!signal_group_exit(tsk->signal)) {
292 mm->core_state = core_state;
293 nr = zap_process(tsk, exit_code);
294 }
295 spin_unlock_irq(&tsk->sighand->siglock);
296 if (unlikely(nr < 0))
297 return nr;
298
299 if (atomic_read(&mm->mm_users) == nr + 1)
300 goto done;
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331 rcu_read_lock();
332 for_each_process(g) {
333 if (g == tsk->group_leader)
334 continue;
335 if (g->flags & PF_KTHREAD)
336 continue;
337 p = g;
338 do {
339 if (p->mm) {
340 if (unlikely(p->mm == mm)) {
341 lock_task_sighand(p, &flags);
342 nr += zap_process(p, exit_code);
343 unlock_task_sighand(p, &flags);
344 }
345 break;
346 }
347 } while_each_thread(g, p);
348 }
349 rcu_read_unlock();
350done:
351 atomic_set(&core_state->nr_threads, nr);
352 return nr;
353}
354
355static int coredump_wait(int exit_code, struct core_state *core_state)
356{
357 struct task_struct *tsk = current;
358 struct mm_struct *mm = tsk->mm;
359 int core_waiters = -EBUSY;
360
361 init_completion(&core_state->startup);
362 core_state->dumper.task = tsk;
363 core_state->dumper.next = NULL;
364
365 down_write(&mm->mmap_sem);
366 if (!mm->core_state)
367 core_waiters = zap_threads(tsk, mm, core_state, exit_code);
368 up_write(&mm->mmap_sem);
369
370 if (core_waiters > 0) {
371 struct core_thread *ptr;
372
373 wait_for_completion(&core_state->startup);
374
375
376
377
378
379 ptr = core_state->dumper.next;
380 while (ptr != NULL) {
381 wait_task_inactive(ptr->task, 0);
382 ptr = ptr->next;
383 }
384 }
385
386 return core_waiters;
387}
388
389static void coredump_finish(struct mm_struct *mm)
390{
391 struct core_thread *curr, *next;
392 struct task_struct *task;
393
394 next = mm->core_state->dumper.next;
395 while ((curr = next) != NULL) {
396 next = curr->next;
397 task = curr->task;
398
399
400
401
402 smp_mb();
403 curr->task = NULL;
404 wake_up_process(task);
405 }
406
407 mm->core_state = NULL;
408}
409
410static void wait_for_dump_helpers(struct file *file)
411{
412 struct pipe_inode_info *pipe;
413
414 pipe = file->f_path.dentry->d_inode->i_pipe;
415
416 pipe_lock(pipe);
417 pipe->readers++;
418 pipe->writers--;
419
420 while ((pipe->readers > 1) && (!signal_pending(current))) {
421 wake_up_interruptible_sync(&pipe->wait);
422 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
423 pipe_wait(pipe);
424 }
425
426 pipe->readers--;
427 pipe->writers++;
428 pipe_unlock(pipe);
429
430}
431
432
433
434
435
436
437
438
439
440
441
442
443static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
444{
445 struct file *files[2];
446 struct coredump_params *cp = (struct coredump_params *)info->data;
447 int err = create_pipe_files(files, 0);
448 if (err)
449 return err;
450
451 cp->file = files[1];
452
453 err = replace_fd(0, files[0], 0);
454 fput(files[0]);
455
456 current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1};
457
458 return err;
459}
460
461void do_coredump(siginfo_t *siginfo)
462{
463 struct core_state core_state;
464 struct core_name cn;
465 struct mm_struct *mm = current->mm;
466 struct linux_binfmt * binfmt;
467 const struct cred *old_cred;
468 struct cred *cred;
469 int retval = 0;
470 int flag = 0;
471 int ispipe;
472 struct files_struct *displaced;
473 bool need_nonrelative = false;
474 static atomic_t core_dump_count = ATOMIC_INIT(0);
475 struct coredump_params cprm = {
476 .siginfo = siginfo,
477 .regs = signal_pt_regs(),
478 .limit = rlimit(RLIMIT_CORE),
479
480
481
482
483
484 .mm_flags = mm->flags,
485 };
486
487 audit_core_dumps(siginfo->si_signo);
488
489 binfmt = mm->binfmt;
490 if (!binfmt || !binfmt->core_dump)
491 goto fail;
492 if (!__get_dumpable(cprm.mm_flags))
493 goto fail;
494
495 cred = prepare_creds();
496 if (!cred)
497 goto fail;
498
499
500
501
502
503
504 if (__get_dumpable(cprm.mm_flags) == SUID_DUMPABLE_SAFE) {
505
506 flag = O_EXCL;
507 cred->fsuid = GLOBAL_ROOT_UID;
508 need_nonrelative = true;
509 }
510
511 retval = coredump_wait(siginfo->si_signo, &core_state);
512 if (retval < 0)
513 goto fail_creds;
514
515 old_cred = override_creds(cred);
516
517
518
519
520
521 clear_thread_flag(TIF_SIGPENDING);
522
523 ispipe = format_corename(&cn, &cprm);
524
525 if (ispipe) {
526 int dump_count;
527 char **helper_argv;
528
529 if (ispipe < 0) {
530 printk(KERN_WARNING "format_corename failed\n");
531 printk(KERN_WARNING "Aborting core\n");
532 goto fail_corename;
533 }
534
535 if (cprm.limit == 1) {
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551 printk(KERN_WARNING
552 "Process %d(%s) has RLIMIT_CORE set to 1\n",
553 task_tgid_vnr(current), current->comm);
554 printk(KERN_WARNING "Aborting core\n");
555 goto fail_unlock;
556 }
557 cprm.limit = RLIM_INFINITY;
558
559 dump_count = atomic_inc_return(&core_dump_count);
560 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
561 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
562 task_tgid_vnr(current), current->comm);
563 printk(KERN_WARNING "Skipping core dump\n");
564 goto fail_dropcount;
565 }
566
567 helper_argv = argv_split(GFP_KERNEL, cn.corename+1, NULL);
568 if (!helper_argv) {
569 printk(KERN_WARNING "%s failed to allocate memory\n",
570 __func__);
571 goto fail_dropcount;
572 }
573
574 retval = call_usermodehelper_fns(helper_argv[0], helper_argv,
575 NULL, UMH_WAIT_EXEC, umh_pipe_setup,
576 NULL, &cprm);
577 argv_free(helper_argv);
578 if (retval) {
579 printk(KERN_INFO "Core dump to %s pipe failed\n",
580 cn.corename);
581 goto close_fail;
582 }
583 } else {
584 struct inode *inode;
585
586 if (cprm.limit < binfmt->min_coredump)
587 goto fail_unlock;
588
589 if (need_nonrelative && cn.corename[0] != '/') {
590 printk(KERN_WARNING "Pid %d(%s) can only dump core "\
591 "to fully qualified path!\n",
592 task_tgid_vnr(current), current->comm);
593 printk(KERN_WARNING "Skipping core dump\n");
594 goto fail_unlock;
595 }
596
597 cprm.file = filp_open(cn.corename,
598 O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
599 0600);
600 if (IS_ERR(cprm.file))
601 goto fail_unlock;
602
603 inode = cprm.file->f_path.dentry->d_inode;
604 if (inode->i_nlink > 1)
605 goto close_fail;
606 if (d_unhashed(cprm.file->f_path.dentry))
607 goto close_fail;
608
609
610
611
612 if (!S_ISREG(inode->i_mode))
613 goto close_fail;
614
615
616
617
618 if (!uid_eq(inode->i_uid, current_fsuid()))
619 goto close_fail;
620 if (!cprm.file->f_op || !cprm.file->f_op->write)
621 goto close_fail;
622 if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
623 goto close_fail;
624 }
625
626
627 retval = unshare_files(&displaced);
628 if (retval)
629 goto close_fail;
630 if (displaced)
631 put_files_struct(displaced);
632 retval = binfmt->core_dump(&cprm);
633 if (retval)
634 current->signal->group_exit_code |= 0x80;
635
636 if (ispipe && core_pipe_limit)
637 wait_for_dump_helpers(cprm.file);
638close_fail:
639 if (cprm.file)
640 filp_close(cprm.file, NULL);
641fail_dropcount:
642 if (ispipe)
643 atomic_dec(&core_dump_count);
644fail_unlock:
645 kfree(cn.corename);
646fail_corename:
647 coredump_finish(mm);
648 revert_creds(old_cred);
649fail_creds:
650 put_cred(cred);
651fail:
652 return;
653}
654
655
656
657
658
659
660int dump_write(struct file *file, const void *addr, int nr)
661{
662 return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
663}
664EXPORT_SYMBOL(dump_write);
665
666int dump_seek(struct file *file, loff_t off)
667{
668 int ret = 1;
669
670 if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
671 if (file->f_op->llseek(file, off, SEEK_CUR) < 0)
672 return 0;
673 } else {
674 char *buf = (char *)get_zeroed_page(GFP_KERNEL);
675
676 if (!buf)
677 return 0;
678 while (off > 0) {
679 unsigned long n = off;
680
681 if (n > PAGE_SIZE)
682 n = PAGE_SIZE;
683 if (!dump_write(file, buf, n)) {
684 ret = 0;
685 break;
686 }
687 off -= n;
688 }
689 free_page((unsigned long)buf);
690 }
691 return ret;
692}
693EXPORT_SYMBOL(dump_seek);
694