linux/arch/arm/include/asm/thread_info.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 *  arch/arm/include/asm/thread_info.h
   4 *
   5 *  Copyright (C) 2002 Russell King.
   6 */
   7#ifndef __ASM_ARM_THREAD_INFO_H
   8#define __ASM_ARM_THREAD_INFO_H
   9
  10#ifdef __KERNEL__
  11
  12#include <linux/compiler.h>
  13#include <asm/fpstate.h>
  14#include <asm/page.h>
  15
  16#ifdef CONFIG_KASAN
  17/*
  18 * KASan uses a lot of extra stack space so the thread size order needs to
  19 * be increased.
  20 */
  21#define THREAD_SIZE_ORDER       2
  22#else
  23#define THREAD_SIZE_ORDER       1
  24#endif
  25#define THREAD_SIZE             (PAGE_SIZE << THREAD_SIZE_ORDER)
  26#define THREAD_START_SP         (THREAD_SIZE - 8)
  27
  28#ifndef __ASSEMBLY__
  29
  30struct task_struct;
  31
  32#include <asm/types.h>
  33
  34typedef unsigned long mm_segment_t;
  35
  36struct cpu_context_save {
  37        __u32   r4;
  38        __u32   r5;
  39        __u32   r6;
  40        __u32   r7;
  41        __u32   r8;
  42        __u32   r9;
  43        __u32   sl;
  44        __u32   fp;
  45        __u32   sp;
  46        __u32   pc;
  47        __u32   extra[2];               /* Xscale 'acc' register, etc */
  48};
  49
  50/*
  51 * low level task data that entry.S needs immediate access to.
  52 * __switch_to() assumes cpu_context follows immediately after cpu_domain.
  53 */
  54struct thread_info {
  55        unsigned long           flags;          /* low level flags */
  56        int                     preempt_count;  /* 0 => preemptable, <0 => bug */
  57        mm_segment_t            addr_limit;     /* address limit */
  58        struct task_struct      *task;          /* main task structure */
  59        __u32                   cpu;            /* cpu */
  60        __u32                   cpu_domain;     /* cpu domain */
  61#ifdef CONFIG_STACKPROTECTOR_PER_TASK
  62        unsigned long           stack_canary;
  63#endif
  64        struct cpu_context_save cpu_context;    /* cpu context */
  65        __u32                   syscall;        /* syscall number */
  66        __u8                    used_cp[16];    /* thread used copro */
  67        unsigned long           tp_value[2];    /* TLS registers */
  68#ifdef CONFIG_CRUNCH
  69        struct crunch_state     crunchstate;
  70#endif
  71        union fp_state          fpstate __attribute__((aligned(8)));
  72        union vfp_state         vfpstate;
  73#ifdef CONFIG_ARM_THUMBEE
  74        unsigned long           thumbee_state;  /* ThumbEE Handler Base register */
  75#endif
  76};
  77
  78#define INIT_THREAD_INFO(tsk)                                           \
  79{                                                                       \
  80        .task           = &tsk,                                         \
  81        .flags          = 0,                                            \
  82        .preempt_count  = INIT_PREEMPT_COUNT,                           \
  83        .addr_limit     = KERNEL_DS,                                    \
  84}
  85
  86/*
  87 * how to get the thread information struct from C
  88 */
  89static inline struct thread_info *current_thread_info(void) __attribute_const__;
  90
  91static inline struct thread_info *current_thread_info(void)
  92{
  93        return (struct thread_info *)
  94                (current_stack_pointer & ~(THREAD_SIZE - 1));
  95}
  96
  97#define thread_saved_pc(tsk)    \
  98        ((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
  99#define thread_saved_sp(tsk)    \
 100        ((unsigned long)(task_thread_info(tsk)->cpu_context.sp))
 101
 102#ifndef CONFIG_THUMB2_KERNEL
 103#define thread_saved_fp(tsk)    \
 104        ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
 105#else
 106#define thread_saved_fp(tsk)    \
 107        ((unsigned long)(task_thread_info(tsk)->cpu_context.r7))
 108#endif
 109
 110extern void crunch_task_disable(struct thread_info *);
 111extern void crunch_task_copy(struct thread_info *, void *);
 112extern void crunch_task_restore(struct thread_info *, void *);
 113extern void crunch_task_release(struct thread_info *);
 114
 115extern void iwmmxt_task_disable(struct thread_info *);
 116extern void iwmmxt_task_copy(struct thread_info *, void *);
 117extern void iwmmxt_task_restore(struct thread_info *, void *);
 118extern void iwmmxt_task_release(struct thread_info *);
 119extern void iwmmxt_task_switch(struct thread_info *);
 120
 121extern void vfp_sync_hwstate(struct thread_info *);
 122extern void vfp_flush_hwstate(struct thread_info *);
 123
 124struct user_vfp;
 125struct user_vfp_exc;
 126
 127extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
 128                                           struct user_vfp_exc *);
 129extern int vfp_restore_user_hwstate(struct user_vfp *,
 130                                    struct user_vfp_exc *);
 131#endif
 132
 133/*
 134 * thread information flags:
 135 *  TIF_USEDFPU         - FPU was used by this task this quantum (SMP)
 136 *  TIF_POLLING_NRFLAG  - true if poll_idle() is polling TIF_NEED_RESCHED
 137 *
 138 * Any bit in the range of 0..15 will cause do_work_pending() to be invoked.
 139 */
 140#define TIF_SIGPENDING          0       /* signal pending */
 141#define TIF_NEED_RESCHED        1       /* rescheduling necessary */
 142#define TIF_NOTIFY_RESUME       2       /* callback before returning to user */
 143#define TIF_UPROBE              3       /* breakpointed or singlestepping */
 144#define TIF_SYSCALL_TRACE       4       /* syscall trace active */
 145#define TIF_SYSCALL_AUDIT       5       /* syscall auditing active */
 146#define TIF_SYSCALL_TRACEPOINT  6       /* syscall tracepoint instrumentation */
 147#define TIF_SECCOMP             7       /* seccomp syscall filtering active */
 148#define TIF_NOTIFY_SIGNAL       8       /* signal notifications exist */
 149
 150#define TIF_USING_IWMMXT        17
 151#define TIF_MEMDIE              18      /* is terminating due to OOM killer */
 152#define TIF_RESTORE_SIGMASK     20
 153
 154#define _TIF_SIGPENDING         (1 << TIF_SIGPENDING)
 155#define _TIF_NEED_RESCHED       (1 << TIF_NEED_RESCHED)
 156#define _TIF_NOTIFY_RESUME      (1 << TIF_NOTIFY_RESUME)
 157#define _TIF_UPROBE             (1 << TIF_UPROBE)
 158#define _TIF_SYSCALL_TRACE      (1 << TIF_SYSCALL_TRACE)
 159#define _TIF_SYSCALL_AUDIT      (1 << TIF_SYSCALL_AUDIT)
 160#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
 161#define _TIF_SECCOMP            (1 << TIF_SECCOMP)
 162#define _TIF_NOTIFY_SIGNAL      (1 << TIF_NOTIFY_SIGNAL)
 163#define _TIF_USING_IWMMXT       (1 << TIF_USING_IWMMXT)
 164
 165/* Checks for any syscall work in entry-common.S */
 166#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
 167                           _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
 168
 169/*
 170 * Change these and you break ASM code in entry-common.S
 171 */
 172#define _TIF_WORK_MASK          (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
 173                                 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
 174                                 _TIF_NOTIFY_SIGNAL)
 175
 176#endif /* __KERNEL__ */
 177#endif /* __ASM_ARM_THREAD_INFO_H */
 178