linux/arch/x86/include/asm/calling.h
<<
>>
Prefs
   1/*
   2
   3 x86 function call convention, 64-bit:
   4 -------------------------------------
   5  arguments           |  callee-saved      | extra caller-saved | return
   6 [callee-clobbered]   |                    | [callee-clobbered] |
   7 ---------------------------------------------------------------------------
   8 rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11             | rax, rdx [**]
   9
  10 ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
  11   functions when it sees tail-call optimization possibilities) rflags is
  12   clobbered. Leftover arguments are passed over the stack frame.)
  13
  14 [*]  In the frame-pointers case rbp is fixed to the stack frame.
  15
  16 [**] for struct return values wider than 64 bits the return convention is a
  17      bit more complex: up to 128 bits width we return small structures
  18      straight in rax, rdx. For structures larger than that (3 words or
  19      larger) the caller puts a pointer to an on-stack return struct
  20      [allocated in the caller's stack frame] into the first argument - i.e.
  21      into rdi. All other arguments shift up by one in this case.
  22      Fortunately this case is rare in the kernel.
  23
  24For 32-bit we have the following conventions - kernel is built with
  25-mregparm=3 and -freg-struct-return:
  26
  27 x86 function calling convention, 32-bit:
  28 ----------------------------------------
  29  arguments         | callee-saved        | extra caller-saved | return
  30 [callee-clobbered] |                     | [callee-clobbered] |
  31 -------------------------------------------------------------------------
  32 eax edx ecx        | ebx edi esi ebp [*] | <none>             | eax, edx [**]
  33
  34 ( here too esp is obviously invariant across normal function calls. eflags
  35   is clobbered. Leftover arguments are passed over the stack frame. )
  36
  37 [*]  In the frame-pointers case ebp is fixed to the stack frame.
  38
  39 [**] We build with -freg-struct-return, which on 32-bit means similar
  40      semantics as on 64-bit: edx can be used for a second return value
  41      (i.e. covering integer and structure sizes up to 64 bits) - after that
  42      it gets more complex and more expensive: 3-word or larger struct returns
  43      get done in the caller's frame and the pointer to the return struct goes
  44      into regparm0, i.e. eax - the other arguments shift up and the
  45      function's register parameters degenerate to regparm=2 in essence.
  46
  47*/
  48
  49#include "dwarf2.h"
  50
  51/*
  52 * 64-bit system call stack frame layout defines and helpers, for
  53 * assembly code (note that the seemingly unnecessary parentheses
  54 * are to prevent cpp from inserting spaces in expressions that get
  55 * passed to macros):
  56 */
  57
  58#define R15               (0)
  59#define R14               (8)
  60#define R13              (16)
  61#define R12              (24)
  62#define RBP              (32)
  63#define RBX              (40)
  64
  65/* arguments: interrupts/non tracing syscalls only save up to here: */
  66#define R11              (48)
  67#define R10              (56)
  68#define R9               (64)
  69#define R8               (72)
  70#define RAX              (80)
  71#define RCX              (88)
  72#define RDX              (96)
  73#define RSI             (104)
  74#define RDI             (112)
  75#define ORIG_RAX        (120)       /* + error_code */
  76/* end of arguments */
  77
  78/* cpu exception frame or undefined in case of fast syscall: */
  79#define RIP             (128)
  80#define CS              (136)
  81#define EFLAGS          (144)
  82#define RSP             (152)
  83#define SS              (160)
  84
  85#define ARGOFFSET       R11
  86#define SWFRAME         ORIG_RAX
  87
  88        .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
  89        subq  $9*8+\addskip, %rsp
  90        CFI_ADJUST_CFA_OFFSET   9*8+\addskip
  91        movq_cfi rdi, 8*8
  92        movq_cfi rsi, 7*8
  93        movq_cfi rdx, 6*8
  94
  95        .if \save_rcx
  96        movq_cfi rcx, 5*8
  97        .endif
  98
  99        movq_cfi rax, 4*8
 100
 101        .if \save_r891011
 102        movq_cfi r8,  3*8
 103        movq_cfi r9,  2*8
 104        movq_cfi r10, 1*8
 105        movq_cfi r11, 0*8
 106        .endif
 107
 108        .endm
 109
 110#define ARG_SKIP        (9*8)
 111
 112        .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
 113                            rstor_r8910=1, rstor_rdx=1
 114        .if \rstor_r11
 115        movq_cfi_restore 0*8, r11
 116        .endif
 117
 118        .if \rstor_r8910
 119        movq_cfi_restore 1*8, r10
 120        movq_cfi_restore 2*8, r9
 121        movq_cfi_restore 3*8, r8
 122        .endif
 123
 124        .if \rstor_rax
 125        movq_cfi_restore 4*8, rax
 126        .endif
 127
 128        .if \rstor_rcx
 129        movq_cfi_restore 5*8, rcx
 130        .endif
 131
 132        .if \rstor_rdx
 133        movq_cfi_restore 6*8, rdx
 134        .endif
 135
 136        movq_cfi_restore 7*8, rsi
 137        movq_cfi_restore 8*8, rdi
 138
 139        .if ARG_SKIP+\addskip > 0
 140        addq $ARG_SKIP+\addskip, %rsp
 141        CFI_ADJUST_CFA_OFFSET   -(ARG_SKIP+\addskip)
 142        .endif
 143        .endm
 144
 145        .macro LOAD_ARGS offset, skiprax=0
 146        movq \offset(%rsp),    %r11
 147        movq \offset+8(%rsp),  %r10
 148        movq \offset+16(%rsp), %r9
 149        movq \offset+24(%rsp), %r8
 150        movq \offset+40(%rsp), %rcx
 151        movq \offset+48(%rsp), %rdx
 152        movq \offset+56(%rsp), %rsi
 153        movq \offset+64(%rsp), %rdi
 154        .if \skiprax
 155        .else
 156        movq \offset+72(%rsp), %rax
 157        .endif
 158        .endm
 159
 160#define REST_SKIP       (6*8)
 161
 162        .macro SAVE_REST
 163        subq $REST_SKIP, %rsp
 164        CFI_ADJUST_CFA_OFFSET   REST_SKIP
 165        movq_cfi rbx, 5*8
 166        movq_cfi rbp, 4*8
 167        movq_cfi r12, 3*8
 168        movq_cfi r13, 2*8
 169        movq_cfi r14, 1*8
 170        movq_cfi r15, 0*8
 171        .endm
 172
 173        .macro RESTORE_REST
 174        movq_cfi_restore 0*8, r15
 175        movq_cfi_restore 1*8, r14
 176        movq_cfi_restore 2*8, r13
 177        movq_cfi_restore 3*8, r12
 178        movq_cfi_restore 4*8, rbp
 179        movq_cfi_restore 5*8, rbx
 180        addq $REST_SKIP, %rsp
 181        CFI_ADJUST_CFA_OFFSET   -(REST_SKIP)
 182        .endm
 183
 184        .macro SAVE_ALL
 185        SAVE_ARGS
 186        SAVE_REST
 187        .endm
 188
 189        .macro RESTORE_ALL addskip=0
 190        RESTORE_REST
 191        RESTORE_ARGS 1, \addskip
 192        .endm
 193
 194        .macro icebp
 195        .byte 0xf1
 196        .endm
 197
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.