linux/arch/arm/mach-ep93xx/crunch.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * arch/arm/kernel/crunch.c
   4 * Cirrus MaverickCrunch context switching and handling
   5 *
   6 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/types.h>
  11#include <linux/kernel.h>
  12#include <linux/signal.h>
  13#include <linux/sched.h>
  14#include <linux/init.h>
  15#include <linux/io.h>
  16
  17#include <asm/thread_notify.h>
  18
  19#include "soc.h"
  20
  21struct crunch_state *crunch_owner;
  22
  23void crunch_task_release(struct thread_info *thread)
  24{
  25        local_irq_disable();
  26        if (crunch_owner == &thread->crunchstate)
  27                crunch_owner = NULL;
  28        local_irq_enable();
  29}
  30
  31static int crunch_enabled(u32 devcfg)
  32{
  33        return !!(devcfg & EP93XX_SYSCON_DEVCFG_CPENA);
  34}
  35
  36static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
  37{
  38        struct thread_info *thread = (struct thread_info *)t;
  39        struct crunch_state *crunch_state;
  40        u32 devcfg;
  41
  42        crunch_state = &thread->crunchstate;
  43
  44        switch (cmd) {
  45        case THREAD_NOTIFY_FLUSH:
  46                memset(crunch_state, 0, sizeof(*crunch_state));
  47
  48                /*
  49                 * FALLTHROUGH: Ensure we don't try to overwrite our newly
  50                 * initialised state information on the first fault.
  51                 */
  52                fallthrough;
  53
  54        case THREAD_NOTIFY_EXIT:
  55                crunch_task_release(thread);
  56                break;
  57
  58        case THREAD_NOTIFY_SWITCH:
  59                devcfg = __raw_readl(EP93XX_SYSCON_DEVCFG);
  60                if (crunch_enabled(devcfg) || crunch_owner == crunch_state) {
  61                        /*
  62                         * We don't use ep93xx_syscon_swlocked_write() here
  63                         * because we are on the context switch path and
  64                         * preemption is already disabled.
  65                         */
  66                        devcfg ^= EP93XX_SYSCON_DEVCFG_CPENA;
  67                        __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
  68                        __raw_writel(devcfg, EP93XX_SYSCON_DEVCFG);
  69                }
  70                break;
  71        }
  72
  73        return NOTIFY_DONE;
  74}
  75
  76static struct notifier_block crunch_notifier_block = {
  77        .notifier_call  = crunch_do,
  78};
  79
  80int __init crunch_init(void)
  81{
  82        thread_register_notifier(&crunch_notifier_block);
  83        elf_hwcap |= HWCAP_CRUNCH;
  84
  85        return 0;
  86}
  87