linux/arch/powerpc/include/asm/ppc_asm.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
   3 */
   4#ifndef _ASM_POWERPC_PPC_ASM_H
   5#define _ASM_POWERPC_PPC_ASM_H
   6
   7#include <linux/init.h>
   8#include <linux/stringify.h>
   9#include <asm/asm-compat.h>
  10#include <asm/processor.h>
  11#include <asm/ppc-opcode.h>
  12
  13#ifndef __ASSEMBLY__
  14#error __FILE__ should only be used in assembler files
  15#else
  16
  17#define SZL                     (BITS_PER_LONG/8)
  18
  19/*
  20 * Stuff for accurate CPU time accounting.
  21 * These macros handle transitions between user and system state
  22 * in exception entry and exit and accumulate time to the
  23 * user_time and system_time fields in the paca.
  24 */
  25
  26#ifndef CONFIG_VIRT_CPU_ACCOUNTING
  27#define ACCOUNT_CPU_USER_ENTRY(ra, rb)
  28#define ACCOUNT_CPU_USER_EXIT(ra, rb)
  29#else
  30#define ACCOUNT_CPU_USER_ENTRY(ra, rb)                                  \
  31        beq     2f;                     /* if from kernel mode */       \
  32BEGIN_FTR_SECTION;                                                      \
  33        mfspr   ra,SPRN_PURR;           /* get processor util. reg */   \
  34END_FTR_SECTION_IFSET(CPU_FTR_PURR);                                    \
  35BEGIN_FTR_SECTION;                                                      \
  36        MFTB(ra);                       /* or get TB if no PURR */      \
  37END_FTR_SECTION_IFCLR(CPU_FTR_PURR);                                    \
  38        ld      rb,PACA_STARTPURR(r13);                                 \
  39        std     ra,PACA_STARTPURR(r13);                                 \
  40        subf    rb,rb,ra;               /* subtract start value */      \
  41        ld      ra,PACA_USER_TIME(r13);                                 \
  42        add     ra,ra,rb;               /* add on to user time */       \
  43        std     ra,PACA_USER_TIME(r13);                                 \
  442:
  45
  46#define ACCOUNT_CPU_USER_EXIT(ra, rb)                                   \
  47BEGIN_FTR_SECTION;                                                      \
  48        mfspr   ra,SPRN_PURR;           /* get processor util. reg */   \
  49END_FTR_SECTION_IFSET(CPU_FTR_PURR);                                    \
  50BEGIN_FTR_SECTION;                                                      \
  51        MFTB(ra);                       /* or get TB if no PURR */      \
  52END_FTR_SECTION_IFCLR(CPU_FTR_PURR);                                    \
  53        ld      rb,PACA_STARTPURR(r13);                                 \
  54        std     ra,PACA_STARTPURR(r13);                                 \
  55        subf    rb,rb,ra;               /* subtract start value */      \
  56        ld      ra,PACA_SYSTEM_TIME(r13);                               \
  57        add     ra,ra,rb;               /* add on to user time */       \
  58        std     ra,PACA_SYSTEM_TIME(r13);
  59#endif
  60
  61/*
  62 * Macros for storing registers into and loading registers from
  63 * exception frames.
  64 */
  65#ifdef __powerpc64__
  66#define SAVE_GPR(n, base)       std     n,GPR0+8*(n)(base)
  67#define REST_GPR(n, base)       ld      n,GPR0+8*(n)(base)
  68#define SAVE_NVGPRS(base)       SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
  69#define REST_NVGPRS(base)       REST_8GPRS(14, base); REST_10GPRS(22, base)
  70#else
  71#define SAVE_GPR(n, base)       stw     n,GPR0+4*(n)(base)
  72#define REST_GPR(n, base)       lwz     n,GPR0+4*(n)(base)
  73#define SAVE_NVGPRS(base)       SAVE_GPR(13, base); SAVE_8GPRS(14, base); \
  74                                SAVE_10GPRS(22, base)
  75#define REST_NVGPRS(base)       REST_GPR(13, base); REST_8GPRS(14, base); \
  76                                REST_10GPRS(22, base)
  77#endif
  78
  79/*
  80 * Define what the VSX XX1 form instructions will look like, then add
  81 * the 128 bit load store instructions based on that.
  82 */
  83#define VSX_XX1(xs, ra, rb)     (((xs) & 0x1f) << 21 | ((ra) << 16) |  \
  84                                 ((rb) << 11) | (((xs) >> 5)))
  85
  86#define STXVD2X(xs, ra, rb)     .long (0x7c000798 | VSX_XX1((xs), (ra), (rb)))
  87#define LXVD2X(xs, ra, rb)      .long (0x7c000698 | VSX_XX1((xs), (ra), (rb)))
  88
  89#define SAVE_2GPRS(n, base)     SAVE_GPR(n, base); SAVE_GPR(n+1, base)
  90#define SAVE_4GPRS(n, base)     SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
  91#define SAVE_8GPRS(n, base)     SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
  92#define SAVE_10GPRS(n, base)    SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
  93#define REST_2GPRS(n, base)     REST_GPR(n, base); REST_GPR(n+1, base)
  94#define REST_4GPRS(n, base)     REST_2GPRS(n, base); REST_2GPRS(n+2, base)
  95#define REST_8GPRS(n, base)     REST_4GPRS(n, base); REST_4GPRS(n+4, base)
  96#define REST_10GPRS(n, base)    REST_8GPRS(n, base); REST_2GPRS(n+8, base)
  97
  98#define SAVE_FPR(n, base)       stfd    n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base)
  99#define SAVE_2FPRS(n, base)     SAVE_FPR(n, base); SAVE_FPR(n+1, base)
 100#define SAVE_4FPRS(n, base)     SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
 101#define SAVE_8FPRS(n, base)     SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
 102#define SAVE_16FPRS(n, base)    SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
 103#define SAVE_32FPRS(n, base)    SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
 104#define REST_FPR(n, base)       lfd     n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base)
 105#define REST_2FPRS(n, base)     REST_FPR(n, base); REST_FPR(n+1, base)
 106#define REST_4FPRS(n, base)     REST_2FPRS(n, base); REST_2FPRS(n+2, base)
 107#define REST_8FPRS(n, base)     REST_4FPRS(n, base); REST_4FPRS(n+4, base)
 108#define REST_16FPRS(n, base)    REST_8FPRS(n, base); REST_8FPRS(n+8, base)
 109#define REST_32FPRS(n, base)    REST_16FPRS(n, base); REST_16FPRS(n+16, base)
 110
 111#define SAVE_VR(n,b,base)       li b,THREAD_VR0+(16*(n));  stvx n,b,base
 112#define SAVE_2VRS(n,b,base)     SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
 113#define SAVE_4VRS(n,b,base)     SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
 114#define SAVE_8VRS(n,b,base)     SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
 115#define SAVE_16VRS(n,b,base)    SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
 116#define SAVE_32VRS(n,b,base)    SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
 117#define REST_VR(n,b,base)       li b,THREAD_VR0+(16*(n)); lvx n,b,base
 118#define REST_2VRS(n,b,base)     REST_VR(n,b,base); REST_VR(n+1,b,base)
 119#define REST_4VRS(n,b,base)     REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
 120#define REST_8VRS(n,b,base)     REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
 121#define REST_16VRS(n,b,base)    REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
 122#define REST_32VRS(n,b,base)    REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
 123
 124/* Save the lower 32 VSRs in the thread VSR region */
 125#define SAVE_VSR(n,b,base)      li b,THREAD_VSR0+(16*(n));  STXVD2X(n,b,base)
 126#define SAVE_2VSRS(n,b,base)    SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
 127#define SAVE_4VSRS(n,b,base)    SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
 128#define SAVE_8VSRS(n,b,base)    SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
 129#define SAVE_16VSRS(n,b,base)   SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
 130#define SAVE_32VSRS(n,b,base)   SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
 131#define REST_VSR(n,b,base)      li b,THREAD_VSR0+(16*(n)); LXVD2X(n,b,base)
 132#define REST_2VSRS(n,b,base)    REST_VSR(n,b,base); REST_VSR(n+1,b,base)
 133#define REST_4VSRS(n,b,base)    REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
 134#define REST_8VSRS(n,b,base)    REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
 135#define REST_16VSRS(n,b,base)   REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
 136#define REST_32VSRS(n,b,base)   REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
 137/* Save the upper 32 VSRs (32-63) in the thread VSX region (0-31) */
 138#define SAVE_VSRU(n,b,base)     li b,THREAD_VR0+(16*(n));  STXVD2X(n+32,b,base)
 139#define SAVE_2VSRSU(n,b,base)   SAVE_VSRU(n,b,base); SAVE_VSRU(n+1,b,base)
 140#define SAVE_4VSRSU(n,b,base)   SAVE_2VSRSU(n,b,base); SAVE_2VSRSU(n+2,b,base)
 141#define SAVE_8VSRSU(n,b,base)   SAVE_4VSRSU(n,b,base); SAVE_4VSRSU(n+4,b,base)
 142#define SAVE_16VSRSU(n,b,base)  SAVE_8VSRSU(n,b,base); SAVE_8VSRSU(n+8,b,base)
 143#define SAVE_32VSRSU(n,b,base)  SAVE_16VSRSU(n,b,base); SAVE_16VSRSU(n+16,b,base)
 144#define REST_VSRU(n,b,base)     li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,b,base)
 145#define REST_2VSRSU(n,b,base)   REST_VSRU(n,b,base); REST_VSRU(n+1,b,base)
 146#define REST_4VSRSU(n,b,base)   REST_2VSRSU(n,b,base); REST_2VSRSU(n+2,b,base)
 147#define REST_8VSRSU(n,b,base)   REST_4VSRSU(n,b,base); REST_4VSRSU(n+4,b,base)
 148#define REST_16VSRSU(n,b,base)  REST_8VSRSU(n,b,base); REST_8VSRSU(n+8,b,base)
 149#define REST_32VSRSU(n,b,base)  REST_16VSRSU(n,b,base); REST_16VSRSU(n+16,b,base)
 150
 151#define SAVE_EVR(n,s,base)      evmergehi s,s,n; stw s,THREAD_EVR0+4*(n)(base)
 152#define SAVE_2EVRS(n,s,base)    SAVE_EVR(n,s,base); SAVE_EVR(n+1,s,base)
 153#define SAVE_4EVRS(n,s,base)    SAVE_2EVRS(n,s,base); SAVE_2EVRS(n+2,s,base)
 154#define SAVE_8EVRS(n,s,base)    SAVE_4EVRS(n,s,base); SAVE_4EVRS(n+4,s,base)
 155#define SAVE_16EVRS(n,s,base)   SAVE_8EVRS(n,s,base); SAVE_8EVRS(n+8,s,base)
 156#define SAVE_32EVRS(n,s,base)   SAVE_16EVRS(n,s,base); SAVE_16EVRS(n+16,s,base)
 157#define REST_EVR(n,s,base)      lwz s,THREAD_EVR0+4*(n)(base); evmergelo n,s,n
 158#define REST_2EVRS(n,s,base)    REST_EVR(n,s,base); REST_EVR(n+1,s,base)
 159#define REST_4EVRS(n,s,base)    REST_2EVRS(n,s,base); REST_2EVRS(n+2,s,base)
 160#define REST_8EVRS(n,s,base)    REST_4EVRS(n,s,base); REST_4EVRS(n+4,s,base)
 161#define REST_16EVRS(n,s,base)   REST_8EVRS(n,s,base); REST_8EVRS(n+8,s,base)
 162#define REST_32EVRS(n,s,base)   REST_16EVRS(n,s,base); REST_16EVRS(n+16,s,base)
 163
 164/* Macros to adjust thread priority for hardware multithreading */
 165#define HMT_VERY_LOW    or      31,31,31        # very low priority
 166#define HMT_LOW         or      1,1,1
 167#define HMT_MEDIUM_LOW  or      6,6,6           # medium low priority
 168#define HMT_MEDIUM      or      2,2,2
 169#define HMT_MEDIUM_HIGH or      5,5,5           # medium high priority
 170#define HMT_HIGH        or      3,3,3
 171
 172#ifdef __KERNEL__
 173#ifdef CONFIG_PPC64
 174
 175#define XGLUE(a,b) a##b
 176#define GLUE(a,b) XGLUE(a,b)
 177
 178#define _GLOBAL(name) \
 179        .section ".text"; \
 180        .align 2 ; \
 181        .globl name; \
 182        .globl GLUE(.,name); \
 183        .section ".opd","aw"; \
 184name: \
 185        .quad GLUE(.,name); \
 186        .quad .TOC.@tocbase; \
 187        .quad 0; \
 188        .previous; \
 189        .type GLUE(.,name),@function; \
 190GLUE(.,name):
 191
 192#define _INIT_GLOBAL(name) \
 193        __REF; \
 194        .align 2 ; \
 195        .globl name; \
 196        .globl GLUE(.,name); \
 197        .section ".opd","aw"; \
 198name: \
 199        .quad GLUE(.,name); \
 200        .quad .TOC.@tocbase; \
 201        .quad 0; \
 202        .previous; \
 203        .type GLUE(.,name),@function; \
 204GLUE(.,name):
 205
 206#define _KPROBE(name) \
 207        .section ".kprobes.text","a"; \
 208        .align 2 ; \
 209        .globl name; \
 210        .globl GLUE(.,name); \
 211        .section ".opd","aw"; \
 212name: \
 213        .quad GLUE(.,name); \
 214        .quad .TOC.@tocbase; \
 215        .quad 0; \
 216        .previous; \
 217        .type GLUE(.,name),@function; \
 218GLUE(.,name):
 219
 220#define _STATIC(name) \
 221        .section ".text"; \
 222        .align 2 ; \
 223        .section ".opd","aw"; \
 224name: \
 225        .quad GLUE(.,name); \
 226        .quad .TOC.@tocbase; \
 227        .quad 0; \
 228        .previous; \
 229        .type GLUE(.,name),@function; \
 230GLUE(.,name):
 231
 232#define _INIT_STATIC(name) \
 233        __REF; \
 234        .align 2 ; \
 235        .section ".opd","aw"; \
 236name: \
 237        .quad GLUE(.,name); \
 238        .quad .TOC.@tocbase; \
 239        .quad 0; \
 240        .previous; \
 241        .type GLUE(.,name),@function; \
 242GLUE(.,name):
 243
 244#else /* 32-bit */
 245
 246#define _ENTRY(n)       \
 247        .globl n;       \
 248n:
 249
 250#define _GLOBAL(n)      \
 251        .text;          \
 252        .stabs __stringify(n:F-1),N_FUN,0,0,n;\
 253        .globl n;       \
 254n:
 255
 256#define _KPROBE(n)      \
 257        .section ".kprobes.text","a";   \
 258        .globl  n;      \
 259n:
 260
 261#endif
 262
 263/* 
 264 * LOAD_REG_IMMEDIATE(rn, expr)
 265 *   Loads the value of the constant expression 'expr' into register 'rn'
 266 *   using immediate instructions only.  Use this when it's important not
 267 *   to reference other data (i.e. on ppc64 when the TOC pointer is not
 268 *   valid) and when 'expr' is a constant or absolute address.
 269 *
 270 * LOAD_REG_ADDR(rn, name)
 271 *   Loads the address of label 'name' into register 'rn'.  Use this when
 272 *   you don't particularly need immediate instructions only, but you need
 273 *   the whole address in one register (e.g. it's a structure address and
 274 *   you want to access various offsets within it).  On ppc32 this is
 275 *   identical to LOAD_REG_IMMEDIATE.
 276 *
 277 * LOAD_REG_ADDRBASE(rn, name)
 278 * ADDROFF(name)
 279 *   LOAD_REG_ADDRBASE loads part of the address of label 'name' into
 280 *   register 'rn'.  ADDROFF(name) returns the remainder of the address as
 281 *   a constant expression.  ADDROFF(name) is a signed expression < 16 bits
 282 *   in size, so is suitable for use directly as an offset in load and store
 283 *   instructions.  Use this when loading/storing a single word or less as:
 284 *      LOAD_REG_ADDRBASE(rX, name)
 285 *      ld      rY,ADDROFF(name)(rX)
 286 */
 287#ifdef __powerpc64__
 288#define LOAD_REG_IMMEDIATE(reg,expr)            \
 289        lis     (reg),(expr)@highest;           \
 290        ori     (reg),(reg),(expr)@higher;      \
 291        rldicr  (reg),(reg),32,31;              \
 292        oris    (reg),(reg),(expr)@h;           \
 293        ori     (reg),(reg),(expr)@l;
 294
 295#define LOAD_REG_ADDR(reg,name)                 \
 296        ld      (reg),name@got(r2)
 297
 298#define LOAD_REG_ADDRBASE(reg,name)     LOAD_REG_ADDR(reg,name)
 299#define ADDROFF(name)                   0
 300
 301/* offsets for stack frame layout */
 302#define LRSAVE  16
 303
 304#else /* 32-bit */
 305
 306#define LOAD_REG_IMMEDIATE(reg,expr)            \
 307        lis     (reg),(expr)@ha;                \
 308        addi    (reg),(reg),(expr)@l;
 309
 310#define LOAD_REG_ADDR(reg,name)         LOAD_REG_IMMEDIATE(reg, name)
 311
 312#define LOAD_REG_ADDRBASE(reg, name)    lis     (reg),name@ha
 313#define ADDROFF(name)                   name@l
 314
 315/* offsets for stack frame layout */
 316#define LRSAVE  4
 317
 318#endif
 319
 320/* various errata or part fixups */
 321#ifdef CONFIG_PPC601_SYNC_FIX
 322#define SYNC                            \
 323BEGIN_FTR_SECTION                       \
 324        sync;                           \
 325        isync;                          \
 326END_FTR_SECTION_IFSET(CPU_FTR_601)
 327#define SYNC_601                        \
 328BEGIN_FTR_SECTION                       \
 329        sync;                           \
 330END_FTR_SECTION_IFSET(CPU_FTR_601)
 331#define ISYNC_601                       \
 332BEGIN_FTR_SECTION                       \
 333        isync;                          \
 334END_FTR_SECTION_IFSET(CPU_FTR_601)
 335#else
 336#define SYNC
 337#define SYNC_601
 338#define ISYNC_601
 339#endif
 340
 341#ifdef CONFIG_PPC_CELL
 342#define MFTB(dest)                      \
 34390:     mftb  dest;                     \
 344BEGIN_FTR_SECTION_NESTED(96);           \
 345        cmpwi dest,0;                   \
 346        beq-  90b;                      \
 347END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
 348#else
 349#define MFTB(dest)                      mftb dest
 350#endif
 351
 352#ifndef CONFIG_SMP
 353#define TLBSYNC
 354#else /* CONFIG_SMP */
 355/* tlbsync is not implemented on 601 */
 356#define TLBSYNC                         \
 357BEGIN_FTR_SECTION                       \
 358        tlbsync;                        \
 359        sync;                           \
 360END_FTR_SECTION_IFCLR(CPU_FTR_601)
 361#endif
 362
 363        
 364/*
 365 * This instruction is not implemented on the PPC 603 or 601; however, on
 366 * the 403GCX and 405GP tlbia IS defined and tlbie is not.
 367 * All of these instructions exist in the 8xx, they have magical powers,
 368 * and they must be used.
 369 */
 370
 371#if !defined(CONFIG_4xx) && !defined(CONFIG_8xx)
 372#define tlbia                                   \
 373        li      r4,1024;                        \
 374        mtctr   r4;                             \
 375        lis     r4,KERNELBASE@h;                \
 3760:      tlbie   r4;                             \
 377        addi    r4,r4,0x1000;                   \
 378        bdnz    0b
 379#endif
 380
 381
 382#ifdef CONFIG_IBM440EP_ERR42
 383#define PPC440EP_ERR42 isync
 384#else
 385#define PPC440EP_ERR42
 386#endif
 387
 388
 389#if defined(CONFIG_BOOKE)
 390#define toreal(rd)
 391#define fromreal(rd)
 392
 393/*
 394 * We use addis to ensure compatibility with the "classic" ppc versions of
 395 * these macros, which use rs = 0 to get the tophys offset in rd, rather than
 396 * converting the address in r0, and so this version has to do that too
 397 * (i.e. set register rd to 0 when rs == 0).
 398 */
 399#define tophys(rd,rs)                           \
 400        addis   rd,rs,0
 401
 402#define tovirt(rd,rs)                           \
 403        addis   rd,rs,0
 404
 405#elif defined(CONFIG_PPC64)
 406#define toreal(rd)              /* we can access c000... in real mode */
 407#define fromreal(rd)
 408
 409#define tophys(rd,rs)                           \
 410        clrldi  rd,rs,2
 411
 412#define tovirt(rd,rs)                           \
 413        rotldi  rd,rs,16;                       \
 414        ori     rd,rd,((KERNELBASE>>48)&0xFFFF);\
 415        rotldi  rd,rd,48
 416#else
 417/*
 418 * On APUS (Amiga PowerPC cpu upgrade board), we don't know the
 419 * physical base address of RAM at compile time.
 420 */
 421#define toreal(rd)      tophys(rd,rd)
 422#define fromreal(rd)    tovirt(rd,rd)
 423
 424#define tophys(rd,rs)                           \
 4250:      addis   rd,rs,-PAGE_OFFSET@h;           \
 426        .section ".vtop_fixup","aw";            \
 427        .align  1;                              \
 428        .long   0b;                             \
 429        .previous
 430
 431#define tovirt(rd,rs)                           \
 4320:      addis   rd,rs,PAGE_OFFSET@h;            \
 433        .section ".ptov_fixup","aw";            \
 434        .align  1;                              \
 435        .long   0b;                             \
 436        .previous
 437#endif
 438
 439#ifdef CONFIG_PPC64
 440#define RFI             rfid
 441#define MTMSRD(r)       mtmsrd  r
 442
 443#else
 444#define FIX_SRR1(ra, rb)
 445#ifndef CONFIG_40x
 446#define RFI             rfi
 447#else
 448#define RFI             rfi; b .        /* Prevent prefetch past rfi */
 449#endif
 450#define MTMSRD(r)       mtmsr   r
 451#define CLR_TOP32(r)
 452#endif
 453
 454#endif /* __KERNEL__ */
 455
 456/* The boring bits... */
 457
 458/* Condition Register Bit Fields */
 459
 460#define cr0     0
 461#define cr1     1
 462#define cr2     2
 463#define cr3     3
 464#define cr4     4
 465#define cr5     5
 466#define cr6     6
 467#define cr7     7
 468
 469
 470/* General Purpose Registers (GPRs) */
 471
 472#define r0      0
 473#define r1      1
 474#define r2      2
 475#define r3      3
 476#define r4      4
 477#define r5      5
 478#define r6      6
 479#define r7      7
 480#define r8      8
 481#define r9      9
 482#define r10     10
 483#define r11     11
 484#define r12     12
 485#define r13     13
 486#define r14     14
 487#define r15     15
 488#define r16     16
 489#define r17     17
 490#define r18     18
 491#define r19     19
 492#define r20     20
 493#define r21     21
 494#define r22     22
 495#define r23     23
 496#define r24     24
 497#define r25     25
 498#define r26     26
 499#define r27     27
 500#define r28     28
 501#define r29     29
 502#define r30     30
 503#define r31     31
 504
 505
 506/* Floating Point Registers (FPRs) */
 507
 508#define fr0     0
 509#define fr1     1
 510#define fr2     2
 511#define fr3     3
 512#define fr4     4
 513#define fr5     5
 514#define fr6     6
 515#define fr7     7
 516#define fr8     8
 517#define fr9     9
 518#define fr10    10
 519#define fr11    11
 520#define fr12    12
 521#define fr13    13
 522#define fr14    14
 523#define fr15    15
 524#define fr16    16
 525#define fr17    17
 526#define fr18    18
 527#define fr19    19
 528#define fr20    20
 529#define fr21    21
 530#define fr22    22
 531#define fr23    23
 532#define fr24    24
 533#define fr25    25
 534#define fr26    26
 535#define fr27    27
 536#define fr28    28
 537#define fr29    29
 538#define fr30    30
 539#define fr31    31
 540
 541/* AltiVec Registers (VPRs) */
 542
 543#define vr0     0
 544#define vr1     1
 545#define vr2     2
 546#define vr3     3
 547#define vr4     4
 548#define vr5     5
 549#define vr6     6
 550#define vr7     7
 551#define vr8     8
 552#define vr9     9
 553#define vr10    10
 554#define vr11    11
 555#define vr12    12
 556#define vr13    13
 557#define vr14    14
 558#define vr15    15
 559#define vr16    16
 560#define vr17    17
 561#define vr18    18
 562#define vr19    19
 563#define vr20    20
 564#define vr21    21
 565#define vr22    22
 566#define vr23    23
 567#define vr24    24
 568#define vr25    25
 569#define vr26    26
 570#define vr27    27
 571#define vr28    28
 572#define vr29    29
 573#define vr30    30
 574#define vr31    31
 575
 576/* VSX Registers (VSRs) */
 577
 578#define vsr0    0
 579#define vsr1    1
 580#define vsr2    2
 581#define vsr3    3
 582#define vsr4    4
 583#define vsr5    5
 584#define vsr6    6
 585#define vsr7    7
 586#define vsr8    8
 587#define vsr9    9
 588#define vsr10   10
 589#define vsr11   11
 590#define vsr12   12
 591#define vsr13   13
 592#define vsr14   14
 593#define vsr15   15
 594#define vsr16   16
 595#define vsr17   17
 596#define vsr18   18
 597#define vsr19   19
 598#define vsr20   20
 599#define vsr21   21
 600#define vsr22   22
 601#define vsr23   23
 602#define vsr24   24
 603#define vsr25   25
 604#define vsr26   26
 605#define vsr27   27
 606#define vsr28   28
 607#define vsr29   29
 608#define vsr30   30
 609#define vsr31   31
 610#define vsr32   32
 611#define vsr33   33
 612#define vsr34   34
 613#define vsr35   35
 614#define vsr36   36
 615#define vsr37   37
 616#define vsr38   38
 617#define vsr39   39
 618#define vsr40   40
 619#define vsr41   41
 620#define vsr42   42
 621#define vsr43   43
 622#define vsr44   44
 623#define vsr45   45
 624#define vsr46   46
 625#define vsr47   47
 626#define vsr48   48
 627#define vsr49   49
 628#define vsr50   50
 629#define vsr51   51
 630#define vsr52   52
 631#define vsr53   53
 632#define vsr54   54
 633#define vsr55   55
 634#define vsr56   56
 635#define vsr57   57
 636#define vsr58   58
 637#define vsr59   59
 638#define vsr60   60
 639#define vsr61   61
 640#define vsr62   62
 641#define vsr63   63
 642
 643/* SPE Registers (EVPRs) */
 644
 645#define evr0    0
 646#define evr1    1
 647#define evr2    2
 648#define evr3    3
 649#define evr4    4
 650#define evr5    5
 651#define evr6    6
 652#define evr7    7
 653#define evr8    8
 654#define evr9    9
 655#define evr10   10
 656#define evr11   11
 657#define evr12   12
 658#define evr13   13
 659#define evr14   14
 660#define evr15   15
 661#define evr16   16
 662#define evr17   17
 663#define evr18   18
 664#define evr19   19
 665#define evr20   20
 666#define evr21   21
 667#define evr22   22
 668#define evr23   23
 669#define evr24   24
 670#define evr25   25
 671#define evr26   26
 672#define evr27   27
 673#define evr28   28
 674#define evr29   29
 675#define evr30   30
 676#define evr31   31
 677
 678/* some stab codes */
 679#define N_FUN   36
 680#define N_RSYM  64
 681#define N_SLINE 68
 682#define N_SO    100
 683
 684#endif /*  __ASSEMBLY__ */
 685
 686#endif /* _ASM_POWERPC_PPC_ASM_H */
 687