linux/arch/arm64/kernel/process.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Based on arch/arm/kernel/process.c
   4 *
   5 * Original Copyright (C) 1995  Linus Torvalds
   6 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
   7 * Copyright (C) 2012 ARM Ltd.
   8 */
   9
  10#include <stdarg.h>
  11
  12#include <linux/compat.h>
  13#include <linux/efi.h>
  14#include <linux/elf.h>
  15#include <linux/export.h>
  16#include <linux/sched.h>
  17#include <linux/sched/debug.h>
  18#include <linux/sched/task.h>
  19#include <linux/sched/task_stack.h>
  20#include <linux/kernel.h>
  21#include <linux/mman.h>
  22#include <linux/mm.h>
  23#include <linux/nospec.h>
  24#include <linux/stddef.h>
  25#include <linux/sysctl.h>
  26#include <linux/unistd.h>
  27#include <linux/user.h>
  28#include <linux/delay.h>
  29#include <linux/reboot.h>
  30#include <linux/interrupt.h>
  31#include <linux/init.h>
  32#include <linux/cpu.h>
  33#include <linux/elfcore.h>
  34#include <linux/pm.h>
  35#include <linux/tick.h>
  36#include <linux/utsname.h>
  37#include <linux/uaccess.h>
  38#include <linux/random.h>
  39#include <linux/hw_breakpoint.h>
  40#include <linux/personality.h>
  41#include <linux/notifier.h>
  42#include <trace/events/power.h>
  43#include <linux/percpu.h>
  44#include <linux/thread_info.h>
  45#include <linux/prctl.h>
  46
  47#include <asm/alternative.h>
  48#include <asm/compat.h>
  49#include <asm/cpufeature.h>
  50#include <asm/cacheflush.h>
  51#include <asm/exec.h>
  52#include <asm/fpsimd.h>
  53#include <asm/mmu_context.h>
  54#include <asm/mte.h>
  55#include <asm/processor.h>
  56#include <asm/pointer_auth.h>
  57#include <asm/stacktrace.h>
  58#include <asm/switch_to.h>
  59#include <asm/system_misc.h>
  60
  61#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
  62#include <linux/stackprotector.h>
  63unsigned long __stack_chk_guard __read_mostly;
  64EXPORT_SYMBOL(__stack_chk_guard);
  65#endif
  66
  67/*
  68 * Function pointers to optional machine specific functions
  69 */
  70void (*pm_power_off)(void);
  71EXPORT_SYMBOL_GPL(pm_power_off);
  72
  73#ifdef CONFIG_HOTPLUG_CPU
  74void arch_cpu_idle_dead(void)
  75{
  76       cpu_die();
  77}
  78#endif
  79
  80/*
  81 * Called by kexec, immediately prior to machine_kexec().
  82 *
  83 * This must completely disable all secondary CPUs; simply causing those CPUs
  84 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
  85 * kexec'd kernel to use any and all RAM as it sees fit, without having to
  86 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
  87 * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this.
  88 */
  89void machine_shutdown(void)
  90{
  91        smp_shutdown_nonboot_cpus(reboot_cpu);
  92}
  93
  94/*
  95 * Halting simply requires that the secondary CPUs stop performing any
  96 * activity (executing tasks, handling interrupts). smp_send_stop()
  97 * achieves this.
  98 */
  99void machine_halt(void)
 100{
 101        local_irq_disable();
 102        smp_send_stop();
 103        while (1);
 104}
 105
 106/*
 107 * Power-off simply requires that the secondary CPUs stop performing any
 108 * activity (executing tasks, handling interrupts). smp_send_stop()
 109 * achieves this. When the system power is turned off, it will take all CPUs
 110 * with it.
 111 */
 112void machine_power_off(void)
 113{
 114        local_irq_disable();
 115        smp_send_stop();
 116        if (pm_power_off)
 117                pm_power_off();
 118}
 119
 120/*
 121 * Restart requires that the secondary CPUs stop performing any activity
 122 * while the primary CPU resets the system. Systems with multiple CPUs must
 123 * provide a HW restart implementation, to ensure that all CPUs reset at once.
 124 * This is required so that any code running after reset on the primary CPU
 125 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
 126 * executing pre-reset code, and using RAM that the primary CPU's code wishes
 127 * to use. Implementing such co-ordination would be essentially impossible.
 128 */
 129void machine_restart(char *cmd)
 130{
 131        /* Disable interrupts first */
 132        local_irq_disable();
 133        smp_send_stop();
 134
 135        /*
 136         * UpdateCapsule() depends on the system being reset via
 137         * ResetSystem().
 138         */
 139        if (efi_enabled(EFI_RUNTIME_SERVICES))
 140                efi_reboot(reboot_mode, NULL);
 141
 142        /* Now call the architecture specific reboot code. */
 143        do_kernel_restart(cmd);
 144
 145        /*
 146         * Whoops - the architecture was unable to reboot.
 147         */
 148        printk("Reboot failed -- System halted\n");
 149        while (1);
 150}
 151
 152#define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str
 153static const char *const btypes[] = {
 154        bstr(NONE, "--"),
 155        bstr(  JC, "jc"),
 156        bstr(   C, "-c"),
 157        bstr(  J , "j-")
 158};
 159#undef bstr
 160
 161static void print_pstate(struct pt_regs *regs)
 162{
 163        u64 pstate = regs->pstate;
 164
 165        if (compat_user_mode(regs)) {
 166                printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n",
 167                        pstate,
 168                        pstate & PSR_AA32_N_BIT ? 'N' : 'n',
 169                        pstate & PSR_AA32_Z_BIT ? 'Z' : 'z',
 170                        pstate & PSR_AA32_C_BIT ? 'C' : 'c',
 171                        pstate & PSR_AA32_V_BIT ? 'V' : 'v',
 172                        pstate & PSR_AA32_Q_BIT ? 'Q' : 'q',
 173                        pstate & PSR_AA32_T_BIT ? "T32" : "A32",
 174                        pstate & PSR_AA32_E_BIT ? "BE" : "LE",
 175                        pstate & PSR_AA32_A_BIT ? 'A' : 'a',
 176                        pstate & PSR_AA32_I_BIT ? 'I' : 'i',
 177                        pstate & PSR_AA32_F_BIT ? 'F' : 'f');
 178        } else {
 179                const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >>
 180                                               PSR_BTYPE_SHIFT];
 181
 182                printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO %cTCO BTYPE=%s)\n",
 183                        pstate,
 184                        pstate & PSR_N_BIT ? 'N' : 'n',
 185                        pstate & PSR_Z_BIT ? 'Z' : 'z',
 186                        pstate & PSR_C_BIT ? 'C' : 'c',
 187                        pstate & PSR_V_BIT ? 'V' : 'v',
 188                        pstate & PSR_D_BIT ? 'D' : 'd',
 189                        pstate & PSR_A_BIT ? 'A' : 'a',
 190                        pstate & PSR_I_BIT ? 'I' : 'i',
 191                        pstate & PSR_F_BIT ? 'F' : 'f',
 192                        pstate & PSR_PAN_BIT ? '+' : '-',
 193                        pstate & PSR_UAO_BIT ? '+' : '-',
 194                        pstate & PSR_TCO_BIT ? '+' : '-',
 195                        btype_str);
 196        }
 197}
 198
 199void __show_regs(struct pt_regs *regs)
 200{
 201        int i, top_reg;
 202        u64 lr, sp;
 203
 204        if (compat_user_mode(regs)) {
 205                lr = regs->compat_lr;
 206                sp = regs->compat_sp;
 207                top_reg = 12;
 208        } else {
 209                lr = regs->regs[30];
 210                sp = regs->sp;
 211                top_reg = 29;
 212        }
 213
 214        show_regs_print_info(KERN_DEFAULT);
 215        print_pstate(regs);
 216
 217        if (!user_mode(regs)) {
 218                printk("pc : %pS\n", (void *)regs->pc);
 219                printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr));
 220        } else {
 221                printk("pc : %016llx\n", regs->pc);
 222                printk("lr : %016llx\n", lr);
 223        }
 224
 225        printk("sp : %016llx\n", sp);
 226
 227        if (system_uses_irq_prio_masking())
 228                printk("pmr_save: %08llx\n", regs->pmr_save);
 229
 230        i = top_reg;
 231
 232        while (i >= 0) {
 233                printk("x%-2d: %016llx", i, regs->regs[i]);
 234
 235                while (i-- % 3)
 236                        pr_cont(" x%-2d: %016llx", i, regs->regs[i]);
 237
 238                pr_cont("\n");
 239        }
 240}
 241
 242void show_regs(struct pt_regs *regs)
 243{
 244        __show_regs(regs);
 245        dump_backtrace(regs, NULL, KERN_DEFAULT);
 246}
 247
 248static void tls_thread_flush(void)
 249{
 250        write_sysreg(0, tpidr_el0);
 251
 252        if (is_compat_task()) {
 253                current->thread.uw.tp_value = 0;
 254
 255                /*
 256                 * We need to ensure ordering between the shadow state and the
 257                 * hardware state, so that we don't corrupt the hardware state
 258                 * with a stale shadow state during context switch.
 259                 */
 260                barrier();
 261                write_sysreg(0, tpidrro_el0);
 262        }
 263}
 264
 265static void flush_tagged_addr_state(void)
 266{
 267        if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI))
 268                clear_thread_flag(TIF_TAGGED_ADDR);
 269}
 270
 271void flush_thread(void)
 272{
 273        fpsimd_flush_thread();
 274        tls_thread_flush();
 275        flush_ptrace_hw_breakpoint(current);
 276        flush_tagged_addr_state();
 277}
 278
 279void release_thread(struct task_struct *dead_task)
 280{
 281}
 282
 283void arch_release_task_struct(struct task_struct *tsk)
 284{
 285        fpsimd_release_task(tsk);
 286}
 287
 288int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 289{
 290        if (current->mm)
 291                fpsimd_preserve_current_state();
 292        *dst = *src;
 293
 294        /* We rely on the above assignment to initialize dst's thread_flags: */
 295        BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
 296
 297        /*
 298         * Detach src's sve_state (if any) from dst so that it does not
 299         * get erroneously used or freed prematurely.  dst's sve_state
 300         * will be allocated on demand later on if dst uses SVE.
 301         * For consistency, also clear TIF_SVE here: this could be done
 302         * later in copy_process(), but to avoid tripping up future
 303         * maintainers it is best not to leave TIF_SVE and sve_state in
 304         * an inconsistent state, even temporarily.
 305         */
 306        dst->thread.sve_state = NULL;
 307        clear_tsk_thread_flag(dst, TIF_SVE);
 308
 309        /* clear any pending asynchronous tag fault raised by the parent */
 310        clear_tsk_thread_flag(dst, TIF_MTE_ASYNC_FAULT);
 311
 312        return 0;
 313}
 314
 315asmlinkage void ret_from_fork(void) asm("ret_from_fork");
 316
 317int copy_thread(unsigned long clone_flags, unsigned long stack_start,
 318                unsigned long stk_sz, struct task_struct *p, unsigned long tls)
 319{
 320        struct pt_regs *childregs = task_pt_regs(p);
 321
 322        memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
 323
 324        /*
 325         * In case p was allocated the same task_struct pointer as some
 326         * other recently-exited task, make sure p is disassociated from
 327         * any cpu that may have run that now-exited task recently.
 328         * Otherwise we could erroneously skip reloading the FPSIMD
 329         * registers for p.
 330         */
 331        fpsimd_flush_task_state(p);
 332
 333        ptrauth_thread_init_kernel(p);
 334
 335        if (likely(!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))) {
 336                *childregs = *current_pt_regs();
 337                childregs->regs[0] = 0;
 338
 339                /*
 340                 * Read the current TLS pointer from tpidr_el0 as it may be
 341                 * out-of-sync with the saved value.
 342                 */
 343                *task_user_tls(p) = read_sysreg(tpidr_el0);
 344
 345                if (stack_start) {
 346                        if (is_compat_thread(task_thread_info(p)))
 347                                childregs->compat_sp = stack_start;
 348                        else
 349                                childregs->sp = stack_start;
 350                }
 351
 352                /*
 353                 * If a TLS pointer was passed to clone, use it for the new
 354                 * thread.
 355                 */
 356                if (clone_flags & CLONE_SETTLS)
 357                        p->thread.uw.tp_value = tls;
 358        } else {
 359                /*
 360                 * A kthread has no context to ERET to, so ensure any buggy
 361                 * ERET is treated as an illegal exception return.
 362                 *
 363                 * When a user task is created from a kthread, childregs will
 364                 * be initialized by start_thread() or start_compat_thread().
 365                 */
 366                memset(childregs, 0, sizeof(struct pt_regs));
 367                childregs->pstate = PSR_MODE_EL1h | PSR_IL_BIT;
 368
 369                p->thread.cpu_context.x19 = stack_start;
 370                p->thread.cpu_context.x20 = stk_sz;
 371        }
 372        p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
 373        p->thread.cpu_context.sp = (unsigned long)childregs;
 374        /*
 375         * For the benefit of the unwinder, set up childregs->stackframe
 376         * as the final frame for the new task.
 377         */
 378        p->thread.cpu_context.fp = (unsigned long)childregs->stackframe;
 379
 380        ptrace_hw_copy_thread(p);
 381
 382        return 0;
 383}
 384
 385void tls_preserve_current_state(void)
 386{
 387        *task_user_tls(current) = read_sysreg(tpidr_el0);
 388}
 389
 390static void tls_thread_switch(struct task_struct *next)
 391{
 392        tls_preserve_current_state();
 393
 394        if (is_compat_thread(task_thread_info(next)))
 395                write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
 396        else if (!arm64_kernel_unmapped_at_el0())
 397                write_sysreg(0, tpidrro_el0);
 398
 399        write_sysreg(*task_user_tls(next), tpidr_el0);
 400}
 401
 402/*
 403 * Force SSBS state on context-switch, since it may be lost after migrating
 404 * from a CPU which treats the bit as RES0 in a heterogeneous system.
 405 */
 406static void ssbs_thread_switch(struct task_struct *next)
 407{
 408        /*
 409         * Nothing to do for kernel threads, but 'regs' may be junk
 410         * (e.g. idle task) so check the flags and bail early.
 411         */
 412        if (unlikely(next->flags & PF_KTHREAD))
 413                return;
 414
 415        /*
 416         * If all CPUs implement the SSBS extension, then we just need to
 417         * context-switch the PSTATE field.
 418         */
 419        if (cpus_have_const_cap(ARM64_SSBS))
 420                return;
 421
 422        spectre_v4_enable_task_mitigation(next);
 423}
 424
 425/*
 426 * We store our current task in sp_el0, which is clobbered by userspace. Keep a
 427 * shadow copy so that we can restore this upon entry from userspace.
 428 *
 429 * This is *only* for exception entry from EL0, and is not valid until we
 430 * __switch_to() a user task.
 431 */
 432DEFINE_PER_CPU(struct task_struct *, __entry_task);
 433
 434static void entry_task_switch(struct task_struct *next)
 435{
 436        __this_cpu_write(__entry_task, next);
 437}
 438
 439/*
 440 * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
 441 * Assuming the virtual counter is enabled at the beginning of times:
 442 *
 443 * - disable access when switching from a 64bit task to a 32bit task
 444 * - enable access when switching from a 32bit task to a 64bit task
 445 */
 446static void erratum_1418040_thread_switch(struct task_struct *prev,
 447                                          struct task_struct *next)
 448{
 449        bool prev32, next32;
 450        u64 val;
 451
 452        if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040))
 453                return;
 454
 455        prev32 = is_compat_thread(task_thread_info(prev));
 456        next32 = is_compat_thread(task_thread_info(next));
 457
 458        if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
 459                return;
 460
 461        val = read_sysreg(cntkctl_el1);
 462
 463        if (!next32)
 464                val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
 465        else
 466                val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
 467
 468        write_sysreg(val, cntkctl_el1);
 469}
 470
 471static void compat_thread_switch(struct task_struct *next)
 472{
 473        if (!is_compat_thread(task_thread_info(next)))
 474                return;
 475
 476        if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
 477                set_tsk_thread_flag(next, TIF_NOTIFY_RESUME);
 478}
 479
 480static void update_sctlr_el1(u64 sctlr)
 481{
 482        /*
 483         * EnIA must not be cleared while in the kernel as this is necessary for
 484         * in-kernel PAC. It will be cleared on kernel exit if needed.
 485         */
 486        sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK & ~SCTLR_ELx_ENIA, sctlr);
 487
 488        /* ISB required for the kernel uaccess routines when setting TCF0. */
 489        isb();
 490}
 491
 492void set_task_sctlr_el1(u64 sctlr)
 493{
 494        /*
 495         * __switch_to() checks current->thread.sctlr as an
 496         * optimisation. Disable preemption so that it does not see
 497         * the variable update before the SCTLR_EL1 one.
 498         */
 499        preempt_disable();
 500        current->thread.sctlr_user = sctlr;
 501        update_sctlr_el1(sctlr);
 502        preempt_enable();
 503}
 504
 505/*
 506 * Thread switching.
 507 */
 508__notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
 509                                struct task_struct *next)
 510{
 511        struct task_struct *last;
 512
 513        fpsimd_thread_switch(next);
 514        tls_thread_switch(next);
 515        hw_breakpoint_thread_switch(next);
 516        contextidr_thread_switch(next);
 517        entry_task_switch(next);
 518        ssbs_thread_switch(next);
 519        erratum_1418040_thread_switch(prev, next);
 520        ptrauth_thread_switch_user(next);
 521        compat_thread_switch(next);
 522
 523        /*
 524         * Complete any pending TLB or cache maintenance on this CPU in case
 525         * the thread migrates to a different CPU.
 526         * This full barrier is also required by the membarrier system
 527         * call.
 528         */
 529        dsb(ish);
 530
 531        /*
 532         * MTE thread switching must happen after the DSB above to ensure that
 533         * any asynchronous tag check faults have been logged in the TFSR*_EL1
 534         * registers.
 535         */
 536        mte_thread_switch(next);
 537        /* avoid expensive SCTLR_EL1 accesses if no change */
 538        if (prev->thread.sctlr_user != next->thread.sctlr_user)
 539                update_sctlr_el1(next->thread.sctlr_user);
 540
 541        /* the actual thread switch */
 542        last = cpu_switch_to(prev, next);
 543
 544        return last;
 545}
 546
 547unsigned long get_wchan(struct task_struct *p)
 548{
 549        struct stackframe frame;
 550        unsigned long stack_page, ret = 0;
 551        int count = 0;
 552        if (!p || p == current || task_is_running(p))
 553                return 0;
 554
 555        stack_page = (unsigned long)try_get_task_stack(p);
 556        if (!stack_page)
 557                return 0;
 558
 559        start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p));
 560
 561        do {
 562                if (unwind_frame(p, &frame))
 563                        goto out;
 564                if (!in_sched_functions(frame.pc)) {
 565                        ret = frame.pc;
 566                        goto out;
 567                }
 568        } while (count++ < 16);
 569
 570out:
 571        put_task_stack(p);
 572        return ret;
 573}
 574
 575unsigned long arch_align_stack(unsigned long sp)
 576{
 577        if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
 578                sp -= get_random_int() & ~PAGE_MASK;
 579        return sp & ~0xf;
 580}
 581
 582/*
 583 * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY.
 584 */
 585void arch_setup_new_exec(void)
 586{
 587        unsigned long mmflags = 0;
 588
 589        if (is_compat_task()) {
 590                mmflags = MMCF_AARCH32;
 591                if (static_branch_unlikely(&arm64_mismatched_32bit_el0))
 592                        set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
 593        }
 594
 595        current->mm->context.flags = mmflags;
 596        ptrauth_thread_init_user();
 597        mte_thread_init_user();
 598
 599        if (task_spec_ssb_noexec(current)) {
 600                arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
 601                                         PR_SPEC_ENABLE);
 602        }
 603}
 604
 605#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
 606/*
 607 * Control the relaxed ABI allowing tagged user addresses into the kernel.
 608 */
 609static unsigned int tagged_addr_disabled;
 610
 611long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
 612{
 613        unsigned long valid_mask = PR_TAGGED_ADDR_ENABLE;
 614        struct thread_info *ti = task_thread_info(task);
 615
 616        if (is_compat_thread(ti))
 617                return -EINVAL;
 618
 619        if (system_supports_mte())
 620                valid_mask |= PR_MTE_TCF_MASK | PR_MTE_TAG_MASK;
 621
 622        if (arg & ~valid_mask)
 623                return -EINVAL;
 624
 625        /*
 626         * Do not allow the enabling of the tagged address ABI if globally
 627         * disabled via sysctl abi.tagged_addr_disabled.
 628         */
 629        if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled)
 630                return -EINVAL;
 631
 632        if (set_mte_ctrl(task, arg) != 0)
 633                return -EINVAL;
 634
 635        update_ti_thread_flag(ti, TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE);
 636
 637        return 0;
 638}
 639
 640long get_tagged_addr_ctrl(struct task_struct *task)
 641{
 642        long ret = 0;
 643        struct thread_info *ti = task_thread_info(task);
 644
 645        if (is_compat_thread(ti))
 646                return -EINVAL;
 647
 648        if (test_ti_thread_flag(ti, TIF_TAGGED_ADDR))
 649                ret = PR_TAGGED_ADDR_ENABLE;
 650
 651        ret |= get_mte_ctrl(task);
 652
 653        return ret;
 654}
 655
 656/*
 657 * Global sysctl to disable the tagged user addresses support. This control
 658 * only prevents the tagged address ABI enabling via prctl() and does not
 659 * disable it for tasks that already opted in to the relaxed ABI.
 660 */
 661
 662static struct ctl_table tagged_addr_sysctl_table[] = {
 663        {
 664                .procname       = "tagged_addr_disabled",
 665                .mode           = 0644,
 666                .data           = &tagged_addr_disabled,
 667                .maxlen         = sizeof(int),
 668                .proc_handler   = proc_dointvec_minmax,
 669                .extra1         = SYSCTL_ZERO,
 670                .extra2         = SYSCTL_ONE,
 671        },
 672        { }
 673};
 674
 675static int __init tagged_addr_init(void)
 676{
 677        if (!register_sysctl("abi", tagged_addr_sysctl_table))
 678                return -EINVAL;
 679        return 0;
 680}
 681
 682core_initcall(tagged_addr_init);
 683#endif  /* CONFIG_ARM64_TAGGED_ADDR_ABI */
 684
 685#ifdef CONFIG_BINFMT_ELF
 686int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
 687                         bool has_interp, bool is_interp)
 688{
 689        /*
 690         * For dynamically linked executables the interpreter is
 691         * responsible for setting PROT_BTI on everything except
 692         * itself.
 693         */
 694        if (is_interp != has_interp)
 695                return prot;
 696
 697        if (!(state->flags & ARM64_ELF_BTI))
 698                return prot;
 699
 700        if (prot & PROT_EXEC)
 701                prot |= PROT_BTI;
 702
 703        return prot;
 704}
 705#endif
 706