linux/arch/x86/kernel/vmlinux_64.lds.S
<<
>>
Prefs
   1/* ld script to make x86-64 Linux kernel
   2 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
   3 */
   4
   5#define LOAD_OFFSET __START_KERNEL_map
   6
   7#include <asm-generic/vmlinux.lds.h>
   8#include <asm/page.h>
   9
  10#undef i386     /* in case the preprocessor is a 32bit one */
  11
  12OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
  13OUTPUT_ARCH(i386:x86-64)
  14ENTRY(phys_startup_64)
  15jiffies_64 = jiffies;
  16_proxy_pda = 1;
  17PHDRS {
  18        text PT_LOAD FLAGS(5);  /* R_E */
  19        data PT_LOAD FLAGS(7);  /* RWE */
  20        user PT_LOAD FLAGS(7);  /* RWE */
  21        data.init PT_LOAD FLAGS(7);     /* RWE */
  22        note PT_NOTE FLAGS(4);  /* R__ */
  23}
  24SECTIONS
  25{
  26  . = __START_KERNEL;
  27  phys_startup_64 = startup_64 - LOAD_OFFSET;
  28  _text = .;                    /* Text and read-only data */
  29  .text :  AT(ADDR(.text) - LOAD_OFFSET) {
  30        /* First the code that has to be first for bootstrapping */
  31        *(.text.head)
  32        _stext = .;
  33        /* Then the rest */
  34        TEXT_TEXT
  35        SCHED_TEXT
  36        LOCK_TEXT
  37        KPROBES_TEXT
  38        *(.fixup)
  39        *(.gnu.warning)
  40        _etext = .;             /* End of text section */
  41  } :text = 0x9090
  42
  43  . = ALIGN(16);                /* Exception table */
  44  __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
  45        __start___ex_table = .;
  46         *(__ex_table)
  47        __stop___ex_table = .;
  48  }
  49
  50  NOTES :text :note
  51
  52  BUG_TABLE :text
  53
  54  RODATA
  55
  56  . = ALIGN(4);
  57  .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {
  58        __tracedata_start = .;
  59        *(.tracedata)
  60        __tracedata_end = .;
  61  }
  62
  63  . = ALIGN(PAGE_SIZE);         /* Align data segment to page size boundary */
  64                                /* Data */
  65  .data : AT(ADDR(.data) - LOAD_OFFSET) {
  66        DATA_DATA
  67        CONSTRUCTORS
  68        } :data
  69
  70  _edata = .;                   /* End of data section */
  71
  72  . = ALIGN(PAGE_SIZE);
  73  . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  74  .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
  75        *(.data.cacheline_aligned)
  76  }
  77  . = ALIGN(CONFIG_X86_INTERNODE_CACHE_BYTES);
  78  .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
  79        *(.data.read_mostly)
  80  }
  81
  82#define VSYSCALL_ADDR (-10*1024*1024)
  83#define VSYSCALL_PHYS_ADDR ((LOADADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
  84#define VSYSCALL_VIRT_ADDR ((ADDR(.data.read_mostly) + SIZEOF(.data.read_mostly) + 4095) & ~(4095))
  85
  86#define VLOAD_OFFSET (VSYSCALL_ADDR - VSYSCALL_PHYS_ADDR)
  87#define VLOAD(x) (ADDR(x) - VLOAD_OFFSET)
  88
  89#define VVIRT_OFFSET (VSYSCALL_ADDR - VSYSCALL_VIRT_ADDR)
  90#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
  91
  92  . = VSYSCALL_ADDR;
  93  .vsyscall_0 :  AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } :user
  94  __vsyscall_0 = VSYSCALL_VIRT_ADDR;
  95
  96  . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  97  .vsyscall_fn : AT(VLOAD(.vsyscall_fn)) { *(.vsyscall_fn) }
  98  . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
  99  .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data))
 100                { *(.vsyscall_gtod_data) }
 101  vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
 102  .vsyscall_clock : AT(VLOAD(.vsyscall_clock))
 103                { *(.vsyscall_clock) }
 104  vsyscall_clock = VVIRT(.vsyscall_clock);
 105
 106
 107  .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1))
 108                { *(.vsyscall_1) }
 109  .vsyscall_2 ADDR(.vsyscall_0) + 2048: AT(VLOAD(.vsyscall_2))
 110                { *(.vsyscall_2) }
 111
 112  .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) { *(.vgetcpu_mode) }
 113  vgetcpu_mode = VVIRT(.vgetcpu_mode);
 114
 115  . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
 116  .jiffies : AT(VLOAD(.jiffies)) { *(.jiffies) }
 117  jiffies = VVIRT(.jiffies);
 118
 119  .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3))
 120                { *(.vsyscall_3) }
 121
 122  . = VSYSCALL_VIRT_ADDR + PAGE_SIZE;
 123
 124#undef VSYSCALL_ADDR
 125#undef VSYSCALL_PHYS_ADDR
 126#undef VSYSCALL_VIRT_ADDR
 127#undef VLOAD_OFFSET
 128#undef VLOAD
 129#undef VVIRT_OFFSET
 130#undef VVIRT
 131
 132  . = ALIGN(THREAD_SIZE);       /* init_task */
 133  .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
 134        *(.data.init_task)
 135  }:data.init
 136
 137  . = ALIGN(PAGE_SIZE);
 138  .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
 139        *(.data.page_aligned)
 140  }
 141
 142  /* might get freed after init */
 143  . = ALIGN(PAGE_SIZE);
 144  __smp_alt_begin = .;
 145  __smp_locks = .;
 146  .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
 147        *(.smp_locks)
 148  }
 149  __smp_locks_end = .;
 150  . = ALIGN(PAGE_SIZE);
 151  __smp_alt_end = .;
 152
 153  . = ALIGN(PAGE_SIZE);         /* Init code and data */
 154  __init_begin = .;
 155  .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
 156        _sinittext = .;
 157        INIT_TEXT
 158        _einittext = .;
 159  }
 160  .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
 161        __initdata_begin = .;
 162        INIT_DATA
 163        __initdata_end = .;
 164   }
 165
 166  . = ALIGN(16);
 167  __setup_start = .;
 168  .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) }
 169  __setup_end = .;
 170  __initcall_start = .;
 171  .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
 172        INITCALLS
 173  }
 174  __initcall_end = .;
 175  __con_initcall_start = .;
 176  .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
 177        *(.con_initcall.init)
 178  }
 179  __con_initcall_end = .;
 180  SECURITY_INIT
 181
 182  . = ALIGN(8);
 183  .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
 184  __parainstructions = .;
 185       *(.parainstructions)
 186  __parainstructions_end = .;
 187  }
 188
 189  . = ALIGN(8);
 190  __alt_instructions = .;
 191  .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
 192        *(.altinstructions)
 193  }
 194  __alt_instructions_end = .;
 195  .altinstr_replacement : AT(ADDR(.altinstr_replacement) - LOAD_OFFSET) {
 196        *(.altinstr_replacement)
 197  }
 198  /* .exit.text is discard at runtime, not link time, to deal with references
 199     from .altinstructions and .eh_frame */
 200  .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
 201        EXIT_TEXT
 202  }
 203  .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
 204        EXIT_DATA
 205  }
 206
 207/* vdso blob that is mapped into user space */
 208  vdso_start = . ;
 209  .vdso  : AT(ADDR(.vdso) - LOAD_OFFSET) { *(.vdso) }
 210  . = ALIGN(PAGE_SIZE);
 211  vdso_end = .;
 212
 213#ifdef CONFIG_BLK_DEV_INITRD
 214  . = ALIGN(PAGE_SIZE);
 215  __initramfs_start = .;
 216  .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
 217  __initramfs_end = .;
 218#endif
 219
 220  PERCPU(PAGE_SIZE)
 221
 222  . = ALIGN(PAGE_SIZE);
 223  __init_end = .;
 224
 225  . = ALIGN(PAGE_SIZE);
 226  __nosave_begin = .;
 227  .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) }
 228  . = ALIGN(PAGE_SIZE);
 229  __nosave_end = .;
 230
 231  __bss_start = .;              /* BSS */
 232  .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
 233        *(.bss.page_aligned)
 234        *(.bss)
 235        }
 236  __bss_stop = .;
 237
 238  _end = . ;
 239
 240  /* Sections to be discarded */
 241  /DISCARD/ : {
 242        *(.exitcall.exit)
 243        *(.eh_frame)
 244        }
 245
 246  STABS_DEBUG
 247
 248  DWARF_DEBUG
 249}
 250
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.