linux/arch/s390/boot/mem_detect.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/errno.h>
   3#include <linux/init.h>
   4#include <asm/sclp.h>
   5#include <asm/sections.h>
   6#include <asm/mem_detect.h>
   7#include <asm/sparsemem.h>
   8#include "compressed/decompressor.h"
   9#include "boot.h"
  10
  11struct mem_detect_info __bootdata(mem_detect);
  12
  13/* up to 256 storage elements, 1020 subincrements each */
  14#define ENTRIES_EXTENDED_MAX                                                   \
  15        (256 * (1020 / 2) * sizeof(struct mem_detect_block))
  16
  17/*
  18 * To avoid corrupting old kernel memory during dump, find lowest memory
  19 * chunk possible either right after the kernel end (decompressed kernel) or
  20 * after initrd (if it is present and there is no hole between the kernel end
  21 * and initrd)
  22 */
  23static void *mem_detect_alloc_extended(void)
  24{
  25        unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
  26
  27        if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
  28            INITRD_START < offset + ENTRIES_EXTENDED_MAX)
  29                offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
  30
  31        return (void *)offset;
  32}
  33
  34static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
  35{
  36        if (n < MEM_INLINED_ENTRIES)
  37                return &mem_detect.entries[n];
  38        if (unlikely(!mem_detect.entries_extended))
  39                mem_detect.entries_extended = mem_detect_alloc_extended();
  40        return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
  41}
  42
  43/*
  44 * sequential calls to add_mem_detect_block with adjacent memory areas
  45 * are merged together into single memory block.
  46 */
  47void add_mem_detect_block(u64 start, u64 end)
  48{
  49        struct mem_detect_block *block;
  50
  51        if (mem_detect.count) {
  52                block = __get_mem_detect_block_ptr(mem_detect.count - 1);
  53                if (block->end == start) {
  54                        block->end = end;
  55                        return;
  56                }
  57        }
  58
  59        block = __get_mem_detect_block_ptr(mem_detect.count);
  60        block->start = start;
  61        block->end = end;
  62        mem_detect.count++;
  63}
  64
  65static int __diag260(unsigned long rx1, unsigned long rx2)
  66{
  67        unsigned long reg1, reg2, ry;
  68        union register_pair rx;
  69        psw_t old;
  70        int rc;
  71
  72        rx.even = rx1;
  73        rx.odd  = rx2;
  74        ry = 0x10; /* storage configuration */
  75        rc = -1;   /* fail */
  76        asm volatile(
  77                "       mvc     0(16,%[psw_old]),0(%[psw_pgm])\n"
  78                "       epsw    %[reg1],%[reg2]\n"
  79                "       st      %[reg1],0(%[psw_pgm])\n"
  80                "       st      %[reg2],4(%[psw_pgm])\n"
  81                "       larl    %[reg1],1f\n"
  82                "       stg     %[reg1],8(%[psw_pgm])\n"
  83                "       diag    %[rx],%[ry],0x260\n"
  84                "       ipm     %[rc]\n"
  85                "       srl     %[rc],28\n"
  86                "1:     mvc     0(16,%[psw_pgm]),0(%[psw_old])\n"
  87                : [reg1] "=&d" (reg1),
  88                  [reg2] "=&a" (reg2),
  89                  [rc] "+&d" (rc),
  90                  [ry] "+&d" (ry),
  91                  "+Q" (S390_lowcore.program_new_psw),
  92                  "=Q" (old)
  93                : [rx] "d" (rx.pair),
  94                  [psw_old] "a" (&old),
  95                  [psw_pgm] "a" (&S390_lowcore.program_new_psw)
  96                : "cc", "memory");
  97        return rc == 0 ? ry : -1;
  98}
  99
 100static int diag260(void)
 101{
 102        int rc, i;
 103
 104        struct {
 105                unsigned long start;
 106                unsigned long end;
 107        } storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
 108
 109        memset(storage_extents, 0, sizeof(storage_extents));
 110        rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
 111        if (rc == -1)
 112                return -1;
 113
 114        for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
 115                add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
 116        return 0;
 117}
 118
 119static int tprot(unsigned long addr)
 120{
 121        unsigned long reg1, reg2;
 122        int rc = -EFAULT;
 123        psw_t old;
 124
 125        asm volatile(
 126                "       mvc     0(16,%[psw_old]),0(%[psw_pgm])\n"
 127                "       epsw    %[reg1],%[reg2]\n"
 128                "       st      %[reg1],0(%[psw_pgm])\n"
 129                "       st      %[reg2],4(%[psw_pgm])\n"
 130                "       larl    %[reg1],1f\n"
 131                "       stg     %[reg1],8(%[psw_pgm])\n"
 132                "       tprot   0(%[addr]),0\n"
 133                "       ipm     %[rc]\n"
 134                "       srl     %[rc],28\n"
 135                "1:     mvc     0(16,%[psw_pgm]),0(%[psw_old])\n"
 136                : [reg1] "=&d" (reg1),
 137                  [reg2] "=&a" (reg2),
 138                  [rc] "+&d" (rc),
 139                  "=Q" (S390_lowcore.program_new_psw.addr),
 140                  "=Q" (old)
 141                : [psw_old] "a" (&old),
 142                  [psw_pgm] "a" (&S390_lowcore.program_new_psw),
 143                  [addr] "a" (addr)
 144                : "cc", "memory");
 145        return rc;
 146}
 147
 148static void search_mem_end(void)
 149{
 150        unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
 151        unsigned long offset = 0;
 152        unsigned long pivot;
 153
 154        while (range > 1) {
 155                range >>= 1;
 156                pivot = offset + range;
 157                if (!tprot(pivot << 20))
 158                        offset = pivot;
 159        }
 160
 161        add_mem_detect_block(0, (offset + 1) << 20);
 162}
 163
 164unsigned long detect_memory(void)
 165{
 166        unsigned long max_physmem_end;
 167
 168        sclp_early_get_memsize(&max_physmem_end);
 169
 170        if (!sclp_early_read_storage_info()) {
 171                mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
 172                return max_physmem_end;
 173        }
 174
 175        if (!diag260()) {
 176                mem_detect.info_source = MEM_DETECT_DIAG260;
 177                return max_physmem_end;
 178        }
 179
 180        if (max_physmem_end) {
 181                add_mem_detect_block(0, max_physmem_end);
 182                mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
 183                return max_physmem_end;
 184        }
 185
 186        search_mem_end();
 187        mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
 188        return get_mem_detect_end();
 189}
 190