linux/arch/x86/events/intel/core.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Per core/cpu state
   4 *
   5 * Used to coordinate shared registers between HT threads or
   6 * among events on a single PMU.
   7 */
   8
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/stddef.h>
  12#include <linux/types.h>
  13#include <linux/init.h>
  14#include <linux/slab.h>
  15#include <linux/export.h>
  16#include <linux/nmi.h>
  17
  18#include <asm/cpufeature.h>
  19#include <asm/hardirq.h>
  20#include <asm/intel-family.h>
  21#include <asm/intel_pt.h>
  22#include <asm/apic.h>
  23#include <asm/cpu_device_id.h>
  24
  25#include "../perf_event.h"
  26
  27/*
  28 * Intel PerfMon, used on Core and later.
  29 */
  30static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
  31{
  32        [PERF_COUNT_HW_CPU_CYCLES]              = 0x003c,
  33        [PERF_COUNT_HW_INSTRUCTIONS]            = 0x00c0,
  34        [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x4f2e,
  35        [PERF_COUNT_HW_CACHE_MISSES]            = 0x412e,
  36        [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x00c4,
  37        [PERF_COUNT_HW_BRANCH_MISSES]           = 0x00c5,
  38        [PERF_COUNT_HW_BUS_CYCLES]              = 0x013c,
  39        [PERF_COUNT_HW_REF_CPU_CYCLES]          = 0x0300, /* pseudo-encoding */
  40};
  41
  42static struct event_constraint intel_core_event_constraints[] __read_mostly =
  43{
  44        INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  45        INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  46        INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  47        INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  48        INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  49        INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
  50        EVENT_CONSTRAINT_END
  51};
  52
  53static struct event_constraint intel_core2_event_constraints[] __read_mostly =
  54{
  55        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  56        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  57        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
  58        INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
  59        INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
  60        INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
  61        INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
  62        INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
  63        INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
  64        INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
  65        INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
  66        INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
  67        INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
  68        EVENT_CONSTRAINT_END
  69};
  70
  71static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
  72{
  73        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  74        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  75        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
  76        INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
  77        INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
  78        INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
  79        INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
  80        INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
  81        INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
  82        INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
  83        INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
  84        EVENT_CONSTRAINT_END
  85};
  86
  87static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
  88{
  89        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
  90        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
  91        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
  92        EVENT_EXTRA_END
  93};
  94
  95static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
  96{
  97        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
  98        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
  99        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 100        INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
 101        INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
 102        INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
 103        INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
 104        EVENT_CONSTRAINT_END
 105};
 106
 107static struct event_constraint intel_snb_event_constraints[] __read_mostly =
 108{
 109        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 110        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 111        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 112        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
 113        INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
 114        INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 115        INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
 116        INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
 117        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 118        INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
 119        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
 120        INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 121
 122        /*
 123         * When HT is off these events can only run on the bottom 4 counters
 124         * When HT is on, they are impacted by the HT bug and require EXCL access
 125         */
 126        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
 127        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
 128        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
 129        INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
 130
 131        EVENT_CONSTRAINT_END
 132};
 133
 134static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
 135{
 136        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 137        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 138        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 139        INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
 140        INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMPTY */
 141        INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
 142        INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
 143        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
 144        INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
 145        INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
 146        INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 147        INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
 148        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 149
 150        /*
 151         * When HT is off these events can only run on the bottom 4 counters
 152         * When HT is on, they are impacted by the HT bug and require EXCL access
 153         */
 154        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
 155        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
 156        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
 157        INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
 158
 159        EVENT_CONSTRAINT_END
 160};
 161
 162static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
 163{
 164        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
 165        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
 166        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
 167        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
 168        EVENT_EXTRA_END
 169};
 170
 171static struct event_constraint intel_v1_event_constraints[] __read_mostly =
 172{
 173        EVENT_CONSTRAINT_END
 174};
 175
 176static struct event_constraint intel_gen_event_constraints[] __read_mostly =
 177{
 178        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 179        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 180        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 181        EVENT_CONSTRAINT_END
 182};
 183
 184static struct event_constraint intel_slm_event_constraints[] __read_mostly =
 185{
 186        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 187        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 188        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
 189        EVENT_CONSTRAINT_END
 190};
 191
 192static struct event_constraint intel_skl_event_constraints[] = {
 193        FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
 194        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
 195        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
 196        INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2),    /* INST_RETIRED.PREC_DIST */
 197
 198        /*
 199         * when HT is off, these can only run on the bottom 4 counters
 200         */
 201        INTEL_EVENT_CONSTRAINT(0xd0, 0xf),      /* MEM_INST_RETIRED.* */
 202        INTEL_EVENT_CONSTRAINT(0xd1, 0xf),      /* MEM_LOAD_RETIRED.* */
 203        INTEL_EVENT_CONSTRAINT(0xd2, 0xf),      /* MEM_LOAD_L3_HIT_RETIRED.* */
 204        INTEL_EVENT_CONSTRAINT(0xcd, 0xf),      /* MEM_TRANS_RETIRED.* */
 205        INTEL_EVENT_CONSTRAINT(0xc6, 0xf),      /* FRONTEND_RETIRED.* */
 206
 207        EVENT_CONSTRAINT_END
 208};
 209
 210static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
 211        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
 212        INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
 213        EVENT_EXTRA_END
 214};
 215
 216static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
 217        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
 218        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
 219        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
 220        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 221        EVENT_EXTRA_END
 222};
 223
 224static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
 225        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
 226        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
 227        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
 228        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 229        EVENT_EXTRA_END
 230};
 231
 232static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
 233        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
 234        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
 235        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 236        /*
 237         * Note the low 8 bits eventsel code is not a continuous field, containing
 238         * some #GPing bits. These are masked out.
 239         */
 240        INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
 241        EVENT_EXTRA_END
 242};
 243
 244static struct event_constraint intel_icl_event_constraints[] = {
 245        FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
 246        FIXED_EVENT_CONSTRAINT(0x01c0, 0),      /* INST_RETIRED.PREC_DIST */
 247        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
 248        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
 249        FIXED_EVENT_CONSTRAINT(0x0400, 3),      /* SLOTS */
 250        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
 251        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
 252        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
 253        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
 254        INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
 255        INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
 256        INTEL_EVENT_CONSTRAINT(0x32, 0xf),      /* SW_PREFETCH_ACCESS.* */
 257        INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
 258        INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
 259        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff),  /* CYCLE_ACTIVITY.STALLS_TOTAL */
 260        INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff),  /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
 261        INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff),  /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
 262        INTEL_EVENT_CONSTRAINT(0xa3, 0xf),      /* CYCLE_ACTIVITY.* */
 263        INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
 264        INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
 265        INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
 266        INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
 267        EVENT_CONSTRAINT_END
 268};
 269
 270static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
 271        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
 272        INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
 273        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 274        INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
 275        EVENT_EXTRA_END
 276};
 277
 278static struct extra_reg intel_spr_extra_regs[] __read_mostly = {
 279        INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
 280        INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
 281        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 282        INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
 283        INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
 284        INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
 285        EVENT_EXTRA_END
 286};
 287
 288static struct event_constraint intel_spr_event_constraints[] = {
 289        FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
 290        FIXED_EVENT_CONSTRAINT(0x01c0, 0),      /* INST_RETIRED.PREC_DIST */
 291        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
 292        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
 293        FIXED_EVENT_CONSTRAINT(0x0400, 3),      /* SLOTS */
 294        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
 295        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
 296        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
 297        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
 298        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
 299        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
 300        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
 301        METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
 302
 303        INTEL_EVENT_CONSTRAINT(0x2e, 0xff),
 304        INTEL_EVENT_CONSTRAINT(0x3c, 0xff),
 305        /*
 306         * Generally event codes < 0x90 are restricted to counters 0-3.
 307         * The 0x2E and 0x3C are exception, which has no restriction.
 308         */
 309        INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
 310
 311        INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
 312        INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
 313        INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf),
 314        INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
 315        INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
 316        INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1),
 317        INTEL_EVENT_CONSTRAINT(0xce, 0x1),
 318        INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
 319        /*
 320         * Generally event codes >= 0x90 are likely to have no restrictions.
 321         * The exception are defined as above.
 322         */
 323        INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff),
 324
 325        EVENT_CONSTRAINT_END
 326};
 327
 328
 329EVENT_ATTR_STR(mem-loads,       mem_ld_nhm,     "event=0x0b,umask=0x10,ldlat=3");
 330EVENT_ATTR_STR(mem-loads,       mem_ld_snb,     "event=0xcd,umask=0x1,ldlat=3");
 331EVENT_ATTR_STR(mem-stores,      mem_st_snb,     "event=0xcd,umask=0x2");
 332
 333static struct attribute *nhm_mem_events_attrs[] = {
 334        EVENT_PTR(mem_ld_nhm),
 335        NULL,
 336};
 337
 338/*
 339 * topdown events for Intel Core CPUs.
 340 *
 341 * The events are all in slots, which is a free slot in a 4 wide
 342 * pipeline. Some events are already reported in slots, for cycle
 343 * events we multiply by the pipeline width (4).
 344 *
 345 * With Hyper Threading on, topdown metrics are either summed or averaged
 346 * between the threads of a core: (count_t0 + count_t1).
 347 *
 348 * For the average case the metric is always scaled to pipeline width,
 349 * so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
 350 */
 351
 352EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
 353        "event=0x3c,umask=0x0",                 /* cpu_clk_unhalted.thread */
 354        "event=0x3c,umask=0x0,any=1");          /* cpu_clk_unhalted.thread_any */
 355EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
 356EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
 357        "event=0xe,umask=0x1");                 /* uops_issued.any */
 358EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
 359        "event=0xc2,umask=0x2");                /* uops_retired.retire_slots */
 360EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
 361        "event=0x9c,umask=0x1");                /* idq_uops_not_delivered_core */
 362EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
 363        "event=0xd,umask=0x3,cmask=1",          /* int_misc.recovery_cycles */
 364        "event=0xd,umask=0x3,cmask=1,any=1");   /* int_misc.recovery_cycles_any */
 365EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
 366        "4", "2");
 367
 368EVENT_ATTR_STR(slots,                   slots,                  "event=0x00,umask=0x4");
 369EVENT_ATTR_STR(topdown-retiring,        td_retiring,            "event=0x00,umask=0x80");
 370EVENT_ATTR_STR(topdown-bad-spec,        td_bad_spec,            "event=0x00,umask=0x81");
 371EVENT_ATTR_STR(topdown-fe-bound,        td_fe_bound,            "event=0x00,umask=0x82");
 372EVENT_ATTR_STR(topdown-be-bound,        td_be_bound,            "event=0x00,umask=0x83");
 373EVENT_ATTR_STR(topdown-heavy-ops,       td_heavy_ops,           "event=0x00,umask=0x84");
 374EVENT_ATTR_STR(topdown-br-mispredict,   td_br_mispredict,       "event=0x00,umask=0x85");
 375EVENT_ATTR_STR(topdown-fetch-lat,       td_fetch_lat,           "event=0x00,umask=0x86");
 376EVENT_ATTR_STR(topdown-mem-bound,       td_mem_bound,           "event=0x00,umask=0x87");
 377
 378static struct attribute *snb_events_attrs[] = {
 379        EVENT_PTR(td_slots_issued),
 380        EVENT_PTR(td_slots_retired),
 381        EVENT_PTR(td_fetch_bubbles),
 382        EVENT_PTR(td_total_slots),
 383        EVENT_PTR(td_total_slots_scale),
 384        EVENT_PTR(td_recovery_bubbles),
 385        EVENT_PTR(td_recovery_bubbles_scale),
 386        NULL,
 387};
 388
 389static struct attribute *snb_mem_events_attrs[] = {
 390        EVENT_PTR(mem_ld_snb),
 391        EVENT_PTR(mem_st_snb),
 392        NULL,
 393};
 394
 395static struct event_constraint intel_hsw_event_constraints[] = {
 396        FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
 397        FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
 398        FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
 399        INTEL_UEVENT_CONSTRAINT(0x148, 0x4),    /* L1D_PEND_MISS.PENDING */
 400        INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
 401        INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
 402        /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
 403        INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
 404        /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
 405        INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
 406        /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
 407        INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
 408
 409        /*
 410         * When HT is off these events can only run on the bottom 4 counters
 411         * When HT is on, they are impacted by the HT bug and require EXCL access
 412         */
 413        INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
 414        INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
 415        INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
 416        INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
 417
 418        EVENT_CONSTRAINT_END
 419};
 420
 421static struct event_constraint intel_bdw_event_constraints[] = {
 422        FIXED_EVENT_CONSTRAINT(0x00c0, 0),      /* INST_RETIRED.ANY */
 423        FIXED_EVENT_CONSTRAINT(0x003c, 1),      /* CPU_CLK_UNHALTED.CORE */
 424        FIXED_EVENT_CONSTRAINT(0x0300, 2),      /* CPU_CLK_UNHALTED.REF */
 425        INTEL_UEVENT_CONSTRAINT(0x148, 0x4),    /* L1D_PEND_MISS.PENDING */
 426        INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4),        /* CYCLE_ACTIVITY.CYCLES_L1D_MISS */
 427        /*
 428         * when HT is off, these can only run on the bottom 4 counters
 429         */
 430        INTEL_EVENT_CONSTRAINT(0xd0, 0xf),      /* MEM_INST_RETIRED.* */
 431        INTEL_EVENT_CONSTRAINT(0xd1, 0xf),      /* MEM_LOAD_RETIRED.* */
 432        INTEL_EVENT_CONSTRAINT(0xd2, 0xf),      /* MEM_LOAD_L3_HIT_RETIRED.* */
 433        INTEL_EVENT_CONSTRAINT(0xcd, 0xf),      /* MEM_TRANS_RETIRED.* */
 434        EVENT_CONSTRAINT_END
 435};
 436
 437static u64 intel_pmu_event_map(int hw_event)
 438{
 439        return intel_perfmon_event_map[hw_event];
 440}
 441
 442static __initconst const u64 spr_hw_cache_event_ids
 443                                [PERF_COUNT_HW_CACHE_MAX]
 444                                [PERF_COUNT_HW_CACHE_OP_MAX]
 445                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 446{
 447 [ C(L1D ) ] = {
 448        [ C(OP_READ) ] = {
 449                [ C(RESULT_ACCESS) ] = 0x81d0,
 450                [ C(RESULT_MISS)   ] = 0xe124,
 451        },
 452        [ C(OP_WRITE) ] = {
 453                [ C(RESULT_ACCESS) ] = 0x82d0,
 454        },
 455 },
 456 [ C(L1I ) ] = {
 457        [ C(OP_READ) ] = {
 458                [ C(RESULT_MISS)   ] = 0xe424,
 459        },
 460        [ C(OP_WRITE) ] = {
 461                [ C(RESULT_ACCESS) ] = -1,
 462                [ C(RESULT_MISS)   ] = -1,
 463        },
 464 },
 465 [ C(LL  ) ] = {
 466        [ C(OP_READ) ] = {
 467                [ C(RESULT_ACCESS) ] = 0x12a,
 468                [ C(RESULT_MISS)   ] = 0x12a,
 469        },
 470        [ C(OP_WRITE) ] = {
 471                [ C(RESULT_ACCESS) ] = 0x12a,
 472                [ C(RESULT_MISS)   ] = 0x12a,
 473        },
 474 },
 475 [ C(DTLB) ] = {
 476        [ C(OP_READ) ] = {
 477                [ C(RESULT_ACCESS) ] = 0x81d0,
 478                [ C(RESULT_MISS)   ] = 0xe12,
 479        },
 480        [ C(OP_WRITE) ] = {
 481                [ C(RESULT_ACCESS) ] = 0x82d0,
 482                [ C(RESULT_MISS)   ] = 0xe13,
 483        },
 484 },
 485 [ C(ITLB) ] = {
 486        [ C(OP_READ) ] = {
 487                [ C(RESULT_ACCESS) ] = -1,
 488                [ C(RESULT_MISS)   ] = 0xe11,
 489        },
 490        [ C(OP_WRITE) ] = {
 491                [ C(RESULT_ACCESS) ] = -1,
 492                [ C(RESULT_MISS)   ] = -1,
 493        },
 494        [ C(OP_PREFETCH) ] = {
 495                [ C(RESULT_ACCESS) ] = -1,
 496                [ C(RESULT_MISS)   ] = -1,
 497        },
 498 },
 499 [ C(BPU ) ] = {
 500        [ C(OP_READ) ] = {
 501                [ C(RESULT_ACCESS) ] = 0x4c4,
 502                [ C(RESULT_MISS)   ] = 0x4c5,
 503        },
 504        [ C(OP_WRITE) ] = {
 505                [ C(RESULT_ACCESS) ] = -1,
 506                [ C(RESULT_MISS)   ] = -1,
 507        },
 508        [ C(OP_PREFETCH) ] = {
 509                [ C(RESULT_ACCESS) ] = -1,
 510                [ C(RESULT_MISS)   ] = -1,
 511        },
 512 },
 513 [ C(NODE) ] = {
 514        [ C(OP_READ) ] = {
 515                [ C(RESULT_ACCESS) ] = 0x12a,
 516                [ C(RESULT_MISS)   ] = 0x12a,
 517        },
 518 },
 519};
 520
 521static __initconst const u64 spr_hw_cache_extra_regs
 522                                [PERF_COUNT_HW_CACHE_MAX]
 523                                [PERF_COUNT_HW_CACHE_OP_MAX]
 524                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 525{
 526 [ C(LL  ) ] = {
 527        [ C(OP_READ) ] = {
 528                [ C(RESULT_ACCESS) ] = 0x10001,
 529                [ C(RESULT_MISS)   ] = 0x3fbfc00001,
 530        },
 531        [ C(OP_WRITE) ] = {
 532                [ C(RESULT_ACCESS) ] = 0x3f3ffc0002,
 533                [ C(RESULT_MISS)   ] = 0x3f3fc00002,
 534        },
 535 },
 536 [ C(NODE) ] = {
 537        [ C(OP_READ) ] = {
 538                [ C(RESULT_ACCESS) ] = 0x10c000001,
 539                [ C(RESULT_MISS)   ] = 0x3fb3000001,
 540        },
 541 },
 542};
 543
 544/*
 545 * Notes on the events:
 546 * - data reads do not include code reads (comparable to earlier tables)
 547 * - data counts include speculative execution (except L1 write, dtlb, bpu)
 548 * - remote node access includes remote memory, remote cache, remote mmio.
 549 * - prefetches are not included in the counts.
 550 * - icache miss does not include decoded icache
 551 */
 552
 553#define SKL_DEMAND_DATA_RD              BIT_ULL(0)
 554#define SKL_DEMAND_RFO                  BIT_ULL(1)
 555#define SKL_ANY_RESPONSE                BIT_ULL(16)
 556#define SKL_SUPPLIER_NONE               BIT_ULL(17)
 557#define SKL_L3_MISS_LOCAL_DRAM          BIT_ULL(26)
 558#define SKL_L3_MISS_REMOTE_HOP0_DRAM    BIT_ULL(27)
 559#define SKL_L3_MISS_REMOTE_HOP1_DRAM    BIT_ULL(28)
 560#define SKL_L3_MISS_REMOTE_HOP2P_DRAM   BIT_ULL(29)
 561#define SKL_L3_MISS                     (SKL_L3_MISS_LOCAL_DRAM| \
 562                                         SKL_L3_MISS_REMOTE_HOP0_DRAM| \
 563                                         SKL_L3_MISS_REMOTE_HOP1_DRAM| \
 564                                         SKL_L3_MISS_REMOTE_HOP2P_DRAM)
 565#define SKL_SPL_HIT                     BIT_ULL(30)
 566#define SKL_SNOOP_NONE                  BIT_ULL(31)
 567#define SKL_SNOOP_NOT_NEEDED            BIT_ULL(32)
 568#define SKL_SNOOP_MISS                  BIT_ULL(33)
 569#define SKL_SNOOP_HIT_NO_FWD            BIT_ULL(34)
 570#define SKL_SNOOP_HIT_WITH_FWD          BIT_ULL(35)
 571#define SKL_SNOOP_HITM                  BIT_ULL(36)
 572#define SKL_SNOOP_NON_DRAM              BIT_ULL(37)
 573#define SKL_ANY_SNOOP                   (SKL_SPL_HIT|SKL_SNOOP_NONE| \
 574                                         SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
 575                                         SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
 576                                         SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
 577#define SKL_DEMAND_READ                 SKL_DEMAND_DATA_RD
 578#define SKL_SNOOP_DRAM                  (SKL_SNOOP_NONE| \
 579                                         SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
 580                                         SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
 581                                         SKL_SNOOP_HITM|SKL_SPL_HIT)
 582#define SKL_DEMAND_WRITE                SKL_DEMAND_RFO
 583#define SKL_LLC_ACCESS                  SKL_ANY_RESPONSE
 584#define SKL_L3_MISS_REMOTE              (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
 585                                         SKL_L3_MISS_REMOTE_HOP1_DRAM| \
 586                                         SKL_L3_MISS_REMOTE_HOP2P_DRAM)
 587
 588static __initconst const u64 skl_hw_cache_event_ids
 589                                [PERF_COUNT_HW_CACHE_MAX]
 590                                [PERF_COUNT_HW_CACHE_OP_MAX]
 591                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 592{
 593 [ C(L1D ) ] = {
 594        [ C(OP_READ) ] = {
 595                [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_INST_RETIRED.ALL_LOADS */
 596                [ C(RESULT_MISS)   ] = 0x151,   /* L1D.REPLACEMENT */
 597        },
 598        [ C(OP_WRITE) ] = {
 599                [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_INST_RETIRED.ALL_STORES */
 600                [ C(RESULT_MISS)   ] = 0x0,
 601        },
 602        [ C(OP_PREFETCH) ] = {
 603                [ C(RESULT_ACCESS) ] = 0x0,
 604                [ C(RESULT_MISS)   ] = 0x0,
 605        },
 606 },
 607 [ C(L1I ) ] = {
 608        [ C(OP_READ) ] = {
 609                [ C(RESULT_ACCESS) ] = 0x0,
 610                [ C(RESULT_MISS)   ] = 0x283,   /* ICACHE_64B.MISS */
 611        },
 612        [ C(OP_WRITE) ] = {
 613                [ C(RESULT_ACCESS) ] = -1,
 614                [ C(RESULT_MISS)   ] = -1,
 615        },
 616        [ C(OP_PREFETCH) ] = {
 617                [ C(RESULT_ACCESS) ] = 0x0,
 618                [ C(RESULT_MISS)   ] = 0x0,
 619        },
 620 },
 621 [ C(LL  ) ] = {
 622        [ C(OP_READ) ] = {
 623                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 624                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 625        },
 626        [ C(OP_WRITE) ] = {
 627                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 628                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 629        },
 630        [ C(OP_PREFETCH) ] = {
 631                [ C(RESULT_ACCESS) ] = 0x0,
 632                [ C(RESULT_MISS)   ] = 0x0,
 633        },
 634 },
 635 [ C(DTLB) ] = {
 636        [ C(OP_READ) ] = {
 637                [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_INST_RETIRED.ALL_LOADS */
 638                [ C(RESULT_MISS)   ] = 0xe08,   /* DTLB_LOAD_MISSES.WALK_COMPLETED */
 639        },
 640        [ C(OP_WRITE) ] = {
 641                [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_INST_RETIRED.ALL_STORES */
 642                [ C(RESULT_MISS)   ] = 0xe49,   /* DTLB_STORE_MISSES.WALK_COMPLETED */
 643        },
 644        [ C(OP_PREFETCH) ] = {
 645                [ C(RESULT_ACCESS) ] = 0x0,
 646                [ C(RESULT_MISS)   ] = 0x0,
 647        },
 648 },
 649 [ C(ITLB) ] = {
 650        [ C(OP_READ) ] = {
 651                [ C(RESULT_ACCESS) ] = 0x2085,  /* ITLB_MISSES.STLB_HIT */
 652                [ C(RESULT_MISS)   ] = 0xe85,   /* ITLB_MISSES.WALK_COMPLETED */
 653        },
 654        [ C(OP_WRITE) ] = {
 655                [ C(RESULT_ACCESS) ] = -1,
 656                [ C(RESULT_MISS)   ] = -1,
 657        },
 658        [ C(OP_PREFETCH) ] = {
 659                [ C(RESULT_ACCESS) ] = -1,
 660                [ C(RESULT_MISS)   ] = -1,
 661        },
 662 },
 663 [ C(BPU ) ] = {
 664        [ C(OP_READ) ] = {
 665                [ C(RESULT_ACCESS) ] = 0xc4,    /* BR_INST_RETIRED.ALL_BRANCHES */
 666                [ C(RESULT_MISS)   ] = 0xc5,    /* BR_MISP_RETIRED.ALL_BRANCHES */
 667        },
 668        [ C(OP_WRITE) ] = {
 669                [ C(RESULT_ACCESS) ] = -1,
 670                [ C(RESULT_MISS)   ] = -1,
 671        },
 672        [ C(OP_PREFETCH) ] = {
 673                [ C(RESULT_ACCESS) ] = -1,
 674                [ C(RESULT_MISS)   ] = -1,
 675        },
 676 },
 677 [ C(NODE) ] = {
 678        [ C(OP_READ) ] = {
 679                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 680                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 681        },
 682        [ C(OP_WRITE) ] = {
 683                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
 684                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
 685        },
 686        [ C(OP_PREFETCH) ] = {
 687                [ C(RESULT_ACCESS) ] = 0x0,
 688                [ C(RESULT_MISS)   ] = 0x0,
 689        },
 690 },
 691};
 692
 693static __initconst const u64 skl_hw_cache_extra_regs
 694                                [PERF_COUNT_HW_CACHE_MAX]
 695                                [PERF_COUNT_HW_CACHE_OP_MAX]
 696                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 697{
 698 [ C(LL  ) ] = {
 699        [ C(OP_READ) ] = {
 700                [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
 701                                       SKL_LLC_ACCESS|SKL_ANY_SNOOP,
 702                [ C(RESULT_MISS)   ] = SKL_DEMAND_READ|
 703                                       SKL_L3_MISS|SKL_ANY_SNOOP|
 704                                       SKL_SUPPLIER_NONE,
 705        },
 706        [ C(OP_WRITE) ] = {
 707                [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
 708                                       SKL_LLC_ACCESS|SKL_ANY_SNOOP,
 709                [ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE|
 710                                       SKL_L3_MISS|SKL_ANY_SNOOP|
 711                                       SKL_SUPPLIER_NONE,
 712        },
 713        [ C(OP_PREFETCH) ] = {
 714                [ C(RESULT_ACCESS) ] = 0x0,
 715                [ C(RESULT_MISS)   ] = 0x0,
 716        },
 717 },
 718 [ C(NODE) ] = {
 719        [ C(OP_READ) ] = {
 720                [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
 721                                       SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
 722                [ C(RESULT_MISS)   ] = SKL_DEMAND_READ|
 723                                       SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
 724        },
 725        [ C(OP_WRITE) ] = {
 726                [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
 727                                       SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
 728                [ C(RESULT_MISS)   ] = SKL_DEMAND_WRITE|
 729                                       SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
 730        },
 731        [ C(OP_PREFETCH) ] = {
 732                [ C(RESULT_ACCESS) ] = 0x0,
 733                [ C(RESULT_MISS)   ] = 0x0,
 734        },
 735 },
 736};
 737
 738#define SNB_DMND_DATA_RD        (1ULL << 0)
 739#define SNB_DMND_RFO            (1ULL << 1)
 740#define SNB_DMND_IFETCH         (1ULL << 2)
 741#define SNB_DMND_WB             (1ULL << 3)
 742#define SNB_PF_DATA_RD          (1ULL << 4)
 743#define SNB_PF_RFO              (1ULL << 5)
 744#define SNB_PF_IFETCH           (1ULL << 6)
 745#define SNB_LLC_DATA_RD         (1ULL << 7)
 746#define SNB_LLC_RFO             (1ULL << 8)
 747#define SNB_LLC_IFETCH          (1ULL << 9)
 748#define SNB_BUS_LOCKS           (1ULL << 10)
 749#define SNB_STRM_ST             (1ULL << 11)
 750#define SNB_OTHER               (1ULL << 15)
 751#define SNB_RESP_ANY            (1ULL << 16)
 752#define SNB_NO_SUPP             (1ULL << 17)
 753#define SNB_LLC_HITM            (1ULL << 18)
 754#define SNB_LLC_HITE            (1ULL << 19)
 755#define SNB_LLC_HITS            (1ULL << 20)
 756#define SNB_LLC_HITF            (1ULL << 21)
 757#define SNB_LOCAL               (1ULL << 22)
 758#define SNB_REMOTE              (0xffULL << 23)
 759#define SNB_SNP_NONE            (1ULL << 31)
 760#define SNB_SNP_NOT_NEEDED      (1ULL << 32)
 761#define SNB_SNP_MISS            (1ULL << 33)
 762#define SNB_NO_FWD              (1ULL << 34)
 763#define SNB_SNP_FWD             (1ULL << 35)
 764#define SNB_HITM                (1ULL << 36)
 765#define SNB_NON_DRAM            (1ULL << 37)
 766
 767#define SNB_DMND_READ           (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
 768#define SNB_DMND_WRITE          (SNB_DMND_RFO|SNB_LLC_RFO)
 769#define SNB_DMND_PREFETCH       (SNB_PF_DATA_RD|SNB_PF_RFO)
 770
 771#define SNB_SNP_ANY             (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
 772                                 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
 773                                 SNB_HITM)
 774
 775#define SNB_DRAM_ANY            (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
 776#define SNB_DRAM_REMOTE         (SNB_REMOTE|SNB_SNP_ANY)
 777
 778#define SNB_L3_ACCESS           SNB_RESP_ANY
 779#define SNB_L3_MISS             (SNB_DRAM_ANY|SNB_NON_DRAM)
 780
 781static __initconst const u64 snb_hw_cache_extra_regs
 782                                [PERF_COUNT_HW_CACHE_MAX]
 783                                [PERF_COUNT_HW_CACHE_OP_MAX]
 784                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 785{
 786 [ C(LL  ) ] = {
 787        [ C(OP_READ) ] = {
 788                [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
 789                [ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_L3_MISS,
 790        },
 791        [ C(OP_WRITE) ] = {
 792                [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
 793                [ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_L3_MISS,
 794        },
 795        [ C(OP_PREFETCH) ] = {
 796                [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
 797                [ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
 798        },
 799 },
 800 [ C(NODE) ] = {
 801        [ C(OP_READ) ] = {
 802                [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
 803                [ C(RESULT_MISS)   ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
 804        },
 805        [ C(OP_WRITE) ] = {
 806                [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
 807                [ C(RESULT_MISS)   ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
 808        },
 809        [ C(OP_PREFETCH) ] = {
 810                [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
 811                [ C(RESULT_MISS)   ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
 812        },
 813 },
 814};
 815
 816static __initconst const u64 snb_hw_cache_event_ids
 817                                [PERF_COUNT_HW_CACHE_MAX]
 818                                [PERF_COUNT_HW_CACHE_OP_MAX]
 819                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 820{
 821 [ C(L1D) ] = {
 822        [ C(OP_READ) ] = {
 823                [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS        */
 824                [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPLACEMENT              */
 825        },
 826        [ C(OP_WRITE) ] = {
 827                [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES       */
 828                [ C(RESULT_MISS)   ] = 0x0851, /* L1D.ALL_M_REPLACEMENT        */
 829        },
 830        [ C(OP_PREFETCH) ] = {
 831                [ C(RESULT_ACCESS) ] = 0x0,
 832                [ C(RESULT_MISS)   ] = 0x024e, /* HW_PRE_REQ.DL1_MISS          */
 833        },
 834 },
 835 [ C(L1I ) ] = {
 836        [ C(OP_READ) ] = {
 837                [ C(RESULT_ACCESS) ] = 0x0,
 838                [ C(RESULT_MISS)   ] = 0x0280, /* ICACHE.MISSES */
 839        },
 840        [ C(OP_WRITE) ] = {
 841                [ C(RESULT_ACCESS) ] = -1,
 842                [ C(RESULT_MISS)   ] = -1,
 843        },
 844        [ C(OP_PREFETCH) ] = {
 845                [ C(RESULT_ACCESS) ] = 0x0,
 846                [ C(RESULT_MISS)   ] = 0x0,
 847        },
 848 },
 849 [ C(LL  ) ] = {
 850        [ C(OP_READ) ] = {
 851                /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
 852                [ C(RESULT_ACCESS) ] = 0x01b7,
 853                /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
 854                [ C(RESULT_MISS)   ] = 0x01b7,
 855        },
 856        [ C(OP_WRITE) ] = {
 857                /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
 858                [ C(RESULT_ACCESS) ] = 0x01b7,
 859                /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
 860                [ C(RESULT_MISS)   ] = 0x01b7,
 861        },
 862        [ C(OP_PREFETCH) ] = {
 863                /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
 864                [ C(RESULT_ACCESS) ] = 0x01b7,
 865                /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
 866                [ C(RESULT_MISS)   ] = 0x01b7,
 867        },
 868 },
 869 [ C(DTLB) ] = {
 870        [ C(OP_READ) ] = {
 871                [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
 872                [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
 873        },
 874        [ C(OP_WRITE) ] = {
 875                [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
 876                [ C(RESULT_MISS)   ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
 877        },
 878        [ C(OP_PREFETCH) ] = {
 879                [ C(RESULT_ACCESS) ] = 0x0,
 880                [ C(RESULT_MISS)   ] = 0x0,
 881        },
 882 },
 883 [ C(ITLB) ] = {
 884        [ C(OP_READ) ] = {
 885                [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT         */
 886                [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK    */
 887        },
 888        [ C(OP_WRITE) ] = {
 889                [ C(RESULT_ACCESS) ] = -1,
 890                [ C(RESULT_MISS)   ] = -1,
 891        },
 892        [ C(OP_PREFETCH) ] = {
 893                [ C(RESULT_ACCESS) ] = -1,
 894                [ C(RESULT_MISS)   ] = -1,
 895        },
 896 },
 897 [ C(BPU ) ] = {
 898        [ C(OP_READ) ] = {
 899                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
 900                [ C(RESULT_MISS)   ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
 901        },
 902        [ C(OP_WRITE) ] = {
 903                [ C(RESULT_ACCESS) ] = -1,
 904                [ C(RESULT_MISS)   ] = -1,
 905        },
 906        [ C(OP_PREFETCH) ] = {
 907                [ C(RESULT_ACCESS) ] = -1,
 908                [ C(RESULT_MISS)   ] = -1,
 909        },
 910 },
 911 [ C(NODE) ] = {
 912        [ C(OP_READ) ] = {
 913                [ C(RESULT_ACCESS) ] = 0x01b7,
 914                [ C(RESULT_MISS)   ] = 0x01b7,
 915        },
 916        [ C(OP_WRITE) ] = {
 917                [ C(RESULT_ACCESS) ] = 0x01b7,
 918                [ C(RESULT_MISS)   ] = 0x01b7,
 919        },
 920        [ C(OP_PREFETCH) ] = {
 921                [ C(RESULT_ACCESS) ] = 0x01b7,
 922                [ C(RESULT_MISS)   ] = 0x01b7,
 923        },
 924 },
 925
 926};
 927
 928/*
 929 * Notes on the events:
 930 * - data reads do not include code reads (comparable to earlier tables)
 931 * - data counts include speculative execution (except L1 write, dtlb, bpu)
 932 * - remote node access includes remote memory, remote cache, remote mmio.
 933 * - prefetches are not included in the counts because they are not
 934 *   reliably counted.
 935 */
 936
 937#define HSW_DEMAND_DATA_RD              BIT_ULL(0)
 938#define HSW_DEMAND_RFO                  BIT_ULL(1)
 939#define HSW_ANY_RESPONSE                BIT_ULL(16)
 940#define HSW_SUPPLIER_NONE               BIT_ULL(17)
 941#define HSW_L3_MISS_LOCAL_DRAM          BIT_ULL(22)
 942#define HSW_L3_MISS_REMOTE_HOP0         BIT_ULL(27)
 943#define HSW_L3_MISS_REMOTE_HOP1         BIT_ULL(28)
 944#define HSW_L3_MISS_REMOTE_HOP2P        BIT_ULL(29)
 945#define HSW_L3_MISS                     (HSW_L3_MISS_LOCAL_DRAM| \
 946                                         HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
 947                                         HSW_L3_MISS_REMOTE_HOP2P)
 948#define HSW_SNOOP_NONE                  BIT_ULL(31)
 949#define HSW_SNOOP_NOT_NEEDED            BIT_ULL(32)
 950#define HSW_SNOOP_MISS                  BIT_ULL(33)
 951#define HSW_SNOOP_HIT_NO_FWD            BIT_ULL(34)
 952#define HSW_SNOOP_HIT_WITH_FWD          BIT_ULL(35)
 953#define HSW_SNOOP_HITM                  BIT_ULL(36)
 954#define HSW_SNOOP_NON_DRAM              BIT_ULL(37)
 955#define HSW_ANY_SNOOP                   (HSW_SNOOP_NONE| \
 956                                         HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
 957                                         HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
 958                                         HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
 959#define HSW_SNOOP_DRAM                  (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
 960#define HSW_DEMAND_READ                 HSW_DEMAND_DATA_RD
 961#define HSW_DEMAND_WRITE                HSW_DEMAND_RFO
 962#define HSW_L3_MISS_REMOTE              (HSW_L3_MISS_REMOTE_HOP0|\
 963                                         HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
 964#define HSW_LLC_ACCESS                  HSW_ANY_RESPONSE
 965
 966#define BDW_L3_MISS_LOCAL               BIT(26)
 967#define BDW_L3_MISS                     (BDW_L3_MISS_LOCAL| \
 968                                         HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
 969                                         HSW_L3_MISS_REMOTE_HOP2P)
 970
 971
 972static __initconst const u64 hsw_hw_cache_event_ids
 973                                [PERF_COUNT_HW_CACHE_MAX]
 974                                [PERF_COUNT_HW_CACHE_OP_MAX]
 975                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
 976{
 977 [ C(L1D ) ] = {
 978        [ C(OP_READ) ] = {
 979                [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_UOPS_RETIRED.ALL_LOADS */
 980                [ C(RESULT_MISS)   ] = 0x151,   /* L1D.REPLACEMENT */
 981        },
 982        [ C(OP_WRITE) ] = {
 983                [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_UOPS_RETIRED.ALL_STORES */
 984                [ C(RESULT_MISS)   ] = 0x0,
 985        },
 986        [ C(OP_PREFETCH) ] = {
 987                [ C(RESULT_ACCESS) ] = 0x0,
 988                [ C(RESULT_MISS)   ] = 0x0,
 989        },
 990 },
 991 [ C(L1I ) ] = {
 992        [ C(OP_READ) ] = {
 993                [ C(RESULT_ACCESS) ] = 0x0,
 994                [ C(RESULT_MISS)   ] = 0x280,   /* ICACHE.MISSES */
 995        },
 996        [ C(OP_WRITE) ] = {
 997                [ C(RESULT_ACCESS) ] = -1,
 998                [ C(RESULT_MISS)   ] = -1,
 999        },
1000        [ C(OP_PREFETCH) ] = {
1001                [ C(RESULT_ACCESS) ] = 0x0,
1002                [ C(RESULT_MISS)   ] = 0x0,
1003        },
1004 },
1005 [ C(LL  ) ] = {
1006        [ C(OP_READ) ] = {
1007                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
1008                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
1009        },
1010        [ C(OP_WRITE) ] = {
1011                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
1012                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
1013        },
1014        [ C(OP_PREFETCH) ] = {
1015                [ C(RESULT_ACCESS) ] = 0x0,
1016                [ C(RESULT_MISS)   ] = 0x0,
1017        },
1018 },
1019 [ C(DTLB) ] = {
1020        [ C(OP_READ) ] = {
1021                [ C(RESULT_ACCESS) ] = 0x81d0,  /* MEM_UOPS_RETIRED.ALL_LOADS */
1022                [ C(RESULT_MISS)   ] = 0x108,   /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
1023        },
1024        [ C(OP_WRITE) ] = {
1025                [ C(RESULT_ACCESS) ] = 0x82d0,  /* MEM_UOPS_RETIRED.ALL_STORES */
1026                [ C(RESULT_MISS)   ] = 0x149,   /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
1027        },
1028        [ C(OP_PREFETCH) ] = {
1029                [ C(RESULT_ACCESS) ] = 0x0,
1030                [ C(RESULT_MISS)   ] = 0x0,
1031        },
1032 },
1033 [ C(ITLB) ] = {
1034        [ C(OP_READ) ] = {
1035                [ C(RESULT_ACCESS) ] = 0x6085,  /* ITLB_MISSES.STLB_HIT */
1036                [ C(RESULT_MISS)   ] = 0x185,   /* ITLB_MISSES.MISS_CAUSES_A_WALK */
1037        },
1038        [ C(OP_WRITE) ] = {
1039                [ C(RESULT_ACCESS) ] = -1,
1040                [ C(RESULT_MISS)   ] = -1,
1041        },
1042        [ C(OP_PREFETCH) ] = {
1043                [ C(RESULT_ACCESS) ] = -1,
1044                [ C(RESULT_MISS)   ] = -1,
1045        },
1046 },
1047 [ C(BPU ) ] = {
1048        [ C(OP_READ) ] = {
1049                [ C(RESULT_ACCESS) ] = 0xc4,    /* BR_INST_RETIRED.ALL_BRANCHES */
1050                [ C(RESULT_MISS)   ] = 0xc5,    /* BR_MISP_RETIRED.ALL_BRANCHES */
1051        },
1052        [ C(OP_WRITE) ] = {
1053                [ C(RESULT_ACCESS) ] = -1,
1054                [ C(RESULT_MISS)   ] = -1,
1055        },
1056        [ C(OP_PREFETCH) ] = {
1057                [ C(RESULT_ACCESS) ] = -1,
1058                [ C(RESULT_MISS)   ] = -1,
1059        },
1060 },
1061 [ C(NODE) ] = {
1062        [ C(OP_READ) ] = {
1063                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
1064                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
1065        },
1066        [ C(OP_WRITE) ] = {
1067                [ C(RESULT_ACCESS) ] = 0x1b7,   /* OFFCORE_RESPONSE */
1068                [ C(RESULT_MISS)   ] = 0x1b7,   /* OFFCORE_RESPONSE */
1069        },
1070        [ C(OP_PREFETCH) ] = {
1071                [ C(RESULT_ACCESS) ] = 0x0,
1072                [ C(RESULT_MISS)   ] = 0x0,
1073        },
1074 },
1075};
1076
1077static __initconst const u64 hsw_hw_cache_extra_regs
1078                                [PERF_COUNT_HW_CACHE_MAX]
1079                                [PERF_COUNT_HW_CACHE_OP_MAX]
1080                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1081{
1082 [ C(LL  ) ] = {
1083        [ C(OP_READ) ] = {
1084                [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1085                                       HSW_LLC_ACCESS,
1086                [ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
1087                                       HSW_L3_MISS|HSW_ANY_SNOOP,
1088        },
1089        [ C(OP_WRITE) ] = {
1090                [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1091                                       HSW_LLC_ACCESS,
1092                [ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
1093                                       HSW_L3_MISS|HSW_ANY_SNOOP,
1094        },
1095        [ C(OP_PREFETCH) ] = {
1096                [ C(RESULT_ACCESS) ] = 0x0,
1097                [ C(RESULT_MISS)   ] = 0x0,
1098        },
1099 },
1100 [ C(NODE) ] = {
1101        [ C(OP_READ) ] = {
1102                [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1103                                       HSW_L3_MISS_LOCAL_DRAM|
1104                                       HSW_SNOOP_DRAM,
1105                [ C(RESULT_MISS)   ] = HSW_DEMAND_READ|
1106                                       HSW_L3_MISS_REMOTE|
1107                                       HSW_SNOOP_DRAM,
1108        },
1109        [ C(OP_WRITE) ] = {
1110                [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1111                                       HSW_L3_MISS_LOCAL_DRAM|
1112                                       HSW_SNOOP_DRAM,
1113                [ C(RESULT_MISS)   ] = HSW_DEMAND_WRITE|
1114                                       HSW_L3_MISS_REMOTE|
1115                                       HSW_SNOOP_DRAM,
1116        },
1117        [ C(OP_PREFETCH) ] = {
1118                [ C(RESULT_ACCESS) ] = 0x0,
1119                [ C(RESULT_MISS)   ] = 0x0,
1120        },
1121 },
1122};
1123
1124static __initconst const u64 westmere_hw_cache_event_ids
1125                                [PERF_COUNT_HW_CACHE_MAX]
1126                                [PERF_COUNT_HW_CACHE_OP_MAX]
1127                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1128{
1129 [ C(L1D) ] = {
1130        [ C(OP_READ) ] = {
1131                [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1132                [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
1133        },
1134        [ C(OP_WRITE) ] = {
1135                [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1136                [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
1137        },
1138        [ C(OP_PREFETCH) ] = {
1139                [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
1140                [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
1141        },
1142 },
1143 [ C(L1I ) ] = {
1144        [ C(OP_READ) ] = {
1145                [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
1146                [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
1147        },
1148        [ C(OP_WRITE) ] = {
1149                [ C(RESULT_ACCESS) ] = -1,
1150                [ C(RESULT_MISS)   ] = -1,
1151        },
1152        [ C(OP_PREFETCH) ] = {
1153                [ C(RESULT_ACCESS) ] = 0x0,
1154                [ C(RESULT_MISS)   ] = 0x0,
1155        },
1156 },
1157 [ C(LL  ) ] = {
1158        [ C(OP_READ) ] = {
1159                /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1160                [ C(RESULT_ACCESS) ] = 0x01b7,
1161                /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1162                [ C(RESULT_MISS)   ] = 0x01b7,
1163        },
1164        /*
1165         * Use RFO, not WRITEBACK, because a write miss would typically occur
1166         * on RFO.
1167         */
1168        [ C(OP_WRITE) ] = {
1169                /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1170                [ C(RESULT_ACCESS) ] = 0x01b7,
1171                /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1172                [ C(RESULT_MISS)   ] = 0x01b7,
1173        },
1174        [ C(OP_PREFETCH) ] = {
1175                /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1176                [ C(RESULT_ACCESS) ] = 0x01b7,
1177                /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1178                [ C(RESULT_MISS)   ] = 0x01b7,
1179        },
1180 },
1181 [ C(DTLB) ] = {
1182        [ C(OP_READ) ] = {
1183                [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1184                [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
1185        },
1186        [ C(OP_WRITE) ] = {
1187                [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1188                [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
1189        },
1190        [ C(OP_PREFETCH) ] = {
1191                [ C(RESULT_ACCESS) ] = 0x0,
1192                [ C(RESULT_MISS)   ] = 0x0,
1193        },
1194 },
1195 [ C(ITLB) ] = {
1196        [ C(OP_READ) ] = {
1197                [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
1198                [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
1199        },
1200        [ C(OP_WRITE) ] = {
1201                [ C(RESULT_ACCESS) ] = -1,
1202                [ C(RESULT_MISS)   ] = -1,
1203        },
1204        [ C(OP_PREFETCH) ] = {
1205                [ C(RESULT_ACCESS) ] = -1,
1206                [ C(RESULT_MISS)   ] = -1,
1207        },
1208 },
1209 [ C(BPU ) ] = {
1210        [ C(OP_READ) ] = {
1211                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1212                [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
1213        },
1214        [ C(OP_WRITE) ] = {
1215                [ C(RESULT_ACCESS) ] = -1,
1216                [ C(RESULT_MISS)   ] = -1,
1217        },
1218        [ C(OP_PREFETCH) ] = {
1219                [ C(RESULT_ACCESS) ] = -1,
1220                [ C(RESULT_MISS)   ] = -1,
1221        },
1222 },
1223 [ C(NODE) ] = {
1224        [ C(OP_READ) ] = {
1225                [ C(RESULT_ACCESS) ] = 0x01b7,
1226                [ C(RESULT_MISS)   ] = 0x01b7,
1227        },
1228        [ C(OP_WRITE) ] = {
1229                [ C(RESULT_ACCESS) ] = 0x01b7,
1230                [ C(RESULT_MISS)   ] = 0x01b7,
1231        },
1232        [ C(OP_PREFETCH) ] = {
1233                [ C(RESULT_ACCESS) ] = 0x01b7,
1234                [ C(RESULT_MISS)   ] = 0x01b7,
1235        },
1236 },
1237};
1238
1239/*
1240 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
1241 * See IA32 SDM Vol 3B 30.6.1.3
1242 */
1243
1244#define NHM_DMND_DATA_RD        (1 << 0)
1245#define NHM_DMND_RFO            (1 << 1)
1246#define NHM_DMND_IFETCH         (1 << 2)
1247#define NHM_DMND_WB             (1 << 3)
1248#define NHM_PF_DATA_RD          (1 << 4)
1249#define NHM_PF_DATA_RFO         (1 << 5)
1250#define NHM_PF_IFETCH           (1 << 6)
1251#define NHM_OFFCORE_OTHER       (1 << 7)
1252#define NHM_UNCORE_HIT          (1 << 8)
1253#define NHM_OTHER_CORE_HIT_SNP  (1 << 9)
1254#define NHM_OTHER_CORE_HITM     (1 << 10)
1255                                /* reserved */
1256#define NHM_REMOTE_CACHE_FWD    (1 << 12)
1257#define NHM_REMOTE_DRAM         (1 << 13)
1258#define NHM_LOCAL_DRAM          (1 << 14)
1259#define NHM_NON_DRAM            (1 << 15)
1260
1261#define NHM_LOCAL               (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1262#define NHM_REMOTE              (NHM_REMOTE_DRAM)
1263
1264#define NHM_DMND_READ           (NHM_DMND_DATA_RD)
1265#define NHM_DMND_WRITE          (NHM_DMND_RFO|NHM_DMND_WB)
1266#define NHM_DMND_PREFETCH       (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1267
1268#define NHM_L3_HIT      (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1269#define NHM_L3_MISS     (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1270#define NHM_L3_ACCESS   (NHM_L3_HIT|NHM_L3_MISS)
1271
1272static __initconst const u64 nehalem_hw_cache_extra_regs
1273                                [PERF_COUNT_HW_CACHE_MAX]
1274                                [PERF_COUNT_HW_CACHE_OP_MAX]
1275                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1276{
1277 [ C(LL  ) ] = {
1278        [ C(OP_READ) ] = {
1279                [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1280                [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_L3_MISS,
1281        },
1282        [ C(OP_WRITE) ] = {
1283                [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1284                [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_L3_MISS,
1285        },
1286        [ C(OP_PREFETCH) ] = {
1287                [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1288                [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1289        },
1290 },
1291 [ C(NODE) ] = {
1292        [ C(OP_READ) ] = {
1293                [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1294                [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_REMOTE,
1295        },
1296        [ C(OP_WRITE) ] = {
1297                [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1298                [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_REMOTE,
1299        },
1300        [ C(OP_PREFETCH) ] = {
1301                [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1302                [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1303        },
1304 },
1305};
1306
1307static __initconst const u64 nehalem_hw_cache_event_ids
1308                                [PERF_COUNT_HW_CACHE_MAX]
1309                                [PERF_COUNT_HW_CACHE_OP_MAX]
1310                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1311{
1312 [ C(L1D) ] = {
1313        [ C(OP_READ) ] = {
1314                [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
1315                [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
1316        },
1317        [ C(OP_WRITE) ] = {
1318                [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
1319                [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
1320        },
1321        [ C(OP_PREFETCH) ] = {
1322                [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
1323                [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
1324        },
1325 },
1326 [ C(L1I ) ] = {
1327        [ C(OP_READ) ] = {
1328                [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
1329                [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
1330        },
1331        [ C(OP_WRITE) ] = {
1332                [ C(RESULT_ACCESS) ] = -1,
1333                [ C(RESULT_MISS)   ] = -1,
1334        },
1335        [ C(OP_PREFETCH) ] = {
1336                [ C(RESULT_ACCESS) ] = 0x0,
1337                [ C(RESULT_MISS)   ] = 0x0,
1338        },
1339 },
1340 [ C(LL  ) ] = {
1341        [ C(OP_READ) ] = {
1342                /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1343                [ C(RESULT_ACCESS) ] = 0x01b7,
1344                /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
1345                [ C(RESULT_MISS)   ] = 0x01b7,
1346        },
1347        /*
1348         * Use RFO, not WRITEBACK, because a write miss would typically occur
1349         * on RFO.
1350         */
1351        [ C(OP_WRITE) ] = {
1352                /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1353                [ C(RESULT_ACCESS) ] = 0x01b7,
1354                /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1355                [ C(RESULT_MISS)   ] = 0x01b7,
1356        },
1357        [ C(OP_PREFETCH) ] = {
1358                /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1359                [ C(RESULT_ACCESS) ] = 0x01b7,
1360                /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1361                [ C(RESULT_MISS)   ] = 0x01b7,
1362        },
1363 },
1364 [ C(DTLB) ] = {
1365        [ C(OP_READ) ] = {
1366                [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
1367                [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
1368        },
1369        [ C(OP_WRITE) ] = {
1370                [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
1371                [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
1372        },
1373        [ C(OP_PREFETCH) ] = {
1374                [ C(RESULT_ACCESS) ] = 0x0,
1375                [ C(RESULT_MISS)   ] = 0x0,
1376        },
1377 },
1378 [ C(ITLB) ] = {
1379        [ C(OP_READ) ] = {
1380                [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
1381                [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
1382        },
1383        [ C(OP_WRITE) ] = {
1384                [ C(RESULT_ACCESS) ] = -1,
1385                [ C(RESULT_MISS)   ] = -1,
1386        },
1387        [ C(OP_PREFETCH) ] = {
1388                [ C(RESULT_ACCESS) ] = -1,
1389                [ C(RESULT_MISS)   ] = -1,
1390        },
1391 },
1392 [ C(BPU ) ] = {
1393        [ C(OP_READ) ] = {
1394                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
1395                [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
1396        },
1397        [ C(OP_WRITE) ] = {
1398                [ C(RESULT_ACCESS) ] = -1,
1399                [ C(RESULT_MISS)   ] = -1,
1400        },
1401        [ C(OP_PREFETCH) ] = {
1402                [ C(RESULT_ACCESS) ] = -1,
1403                [ C(RESULT_MISS)   ] = -1,
1404        },
1405 },
1406 [ C(NODE) ] = {
1407        [ C(OP_READ) ] = {
1408                [ C(RESULT_ACCESS) ] = 0x01b7,
1409                [ C(RESULT_MISS)   ] = 0x01b7,
1410        },
1411        [ C(OP_WRITE) ] = {
1412                [ C(RESULT_ACCESS) ] = 0x01b7,
1413                [ C(RESULT_MISS)   ] = 0x01b7,
1414        },
1415        [ C(OP_PREFETCH) ] = {
1416                [ C(RESULT_ACCESS) ] = 0x01b7,
1417                [ C(RESULT_MISS)   ] = 0x01b7,
1418        },
1419 },
1420};
1421
1422static __initconst const u64 core2_hw_cache_event_ids
1423                                [PERF_COUNT_HW_CACHE_MAX]
1424                                [PERF_COUNT_HW_CACHE_OP_MAX]
1425                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1426{
1427 [ C(L1D) ] = {
1428        [ C(OP_READ) ] = {
1429                [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
1430                [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
1431        },
1432        [ C(OP_WRITE) ] = {
1433                [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
1434                [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
1435        },
1436        [ C(OP_PREFETCH) ] = {
1437                [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
1438                [ C(RESULT_MISS)   ] = 0,
1439        },
1440 },
1441 [ C(L1I ) ] = {
1442        [ C(OP_READ) ] = {
1443                [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
1444                [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
1445        },
1446        [ C(OP_WRITE) ] = {
1447                [ C(RESULT_ACCESS) ] = -1,
1448                [ C(RESULT_MISS)   ] = -1,
1449        },
1450        [ C(OP_PREFETCH) ] = {
1451                [ C(RESULT_ACCESS) ] = 0,
1452                [ C(RESULT_MISS)   ] = 0,
1453        },
1454 },
1455 [ C(LL  ) ] = {
1456        [ C(OP_READ) ] = {
1457                [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
1458                [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
1459        },
1460        [ C(OP_WRITE) ] = {
1461                [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
1462                [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
1463        },
1464        [ C(OP_PREFETCH) ] = {
1465                [ C(RESULT_ACCESS) ] = 0,
1466                [ C(RESULT_MISS)   ] = 0,
1467        },
1468 },
1469 [ C(DTLB) ] = {
1470        [ C(OP_READ) ] = {
1471                [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
1472                [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
1473        },
1474        [ C(OP_WRITE) ] = {
1475                [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
1476                [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
1477        },
1478        [ C(OP_PREFETCH) ] = {
1479                [ C(RESULT_ACCESS) ] = 0,
1480                [ C(RESULT_MISS)   ] = 0,
1481        },
1482 },
1483 [ C(ITLB) ] = {
1484        [ C(OP_READ) ] = {
1485                [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
1486                [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
1487        },
1488        [ C(OP_WRITE) ] = {
1489                [ C(RESULT_ACCESS) ] = -1,
1490                [ C(RESULT_MISS)   ] = -1,
1491        },
1492        [ C(OP_PREFETCH) ] = {
1493                [ C(RESULT_ACCESS) ] = -1,
1494                [ C(RESULT_MISS)   ] = -1,
1495        },
1496 },
1497 [ C(BPU ) ] = {
1498        [ C(OP_READ) ] = {
1499                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
1500                [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1501        },
1502        [ C(OP_WRITE) ] = {
1503                [ C(RESULT_ACCESS) ] = -1,
1504                [ C(RESULT_MISS)   ] = -1,
1505        },
1506        [ C(OP_PREFETCH) ] = {
1507                [ C(RESULT_ACCESS) ] = -1,
1508                [ C(RESULT_MISS)   ] = -1,
1509        },
1510 },
1511};
1512
1513static __initconst const u64 atom_hw_cache_event_ids
1514                                [PERF_COUNT_HW_CACHE_MAX]
1515                                [PERF_COUNT_HW_CACHE_OP_MAX]
1516                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1517{
1518 [ C(L1D) ] = {
1519        [ C(OP_READ) ] = {
1520                [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
1521                [ C(RESULT_MISS)   ] = 0,
1522        },
1523        [ C(OP_WRITE) ] = {
1524                [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
1525                [ C(RESULT_MISS)   ] = 0,
1526        },
1527        [ C(OP_PREFETCH) ] = {
1528                [ C(RESULT_ACCESS) ] = 0x0,
1529                [ C(RESULT_MISS)   ] = 0,
1530        },
1531 },
1532 [ C(L1I ) ] = {
1533        [ C(OP_READ) ] = {
1534                [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
1535                [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
1536        },
1537        [ C(OP_WRITE) ] = {
1538                [ C(RESULT_ACCESS) ] = -1,
1539                [ C(RESULT_MISS)   ] = -1,
1540        },
1541        [ C(OP_PREFETCH) ] = {
1542                [ C(RESULT_ACCESS) ] = 0,
1543                [ C(RESULT_MISS)   ] = 0,
1544        },
1545 },
1546 [ C(LL  ) ] = {
1547        [ C(OP_READ) ] = {
1548                [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
1549                [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
1550        },
1551        [ C(OP_WRITE) ] = {
1552                [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
1553                [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
1554        },
1555        [ C(OP_PREFETCH) ] = {
1556                [ C(RESULT_ACCESS) ] = 0,
1557                [ C(RESULT_MISS)   ] = 0,
1558        },
1559 },
1560 [ C(DTLB) ] = {
1561        [ C(OP_READ) ] = {
1562                [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
1563                [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
1564        },
1565        [ C(OP_WRITE) ] = {
1566                [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
1567                [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
1568        },
1569        [ C(OP_PREFETCH) ] = {
1570                [ C(RESULT_ACCESS) ] = 0,
1571                [ C(RESULT_MISS)   ] = 0,
1572        },
1573 },
1574 [ C(ITLB) ] = {
1575        [ C(OP_READ) ] = {
1576                [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
1577                [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
1578        },
1579        [ C(OP_WRITE) ] = {
1580                [ C(RESULT_ACCESS) ] = -1,
1581                [ C(RESULT_MISS)   ] = -1,
1582        },
1583        [ C(OP_PREFETCH) ] = {
1584                [ C(RESULT_ACCESS) ] = -1,
1585                [ C(RESULT_MISS)   ] = -1,
1586        },
1587 },
1588 [ C(BPU ) ] = {
1589        [ C(OP_READ) ] = {
1590                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
1591                [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
1592        },
1593        [ C(OP_WRITE) ] = {
1594                [ C(RESULT_ACCESS) ] = -1,
1595                [ C(RESULT_MISS)   ] = -1,
1596        },
1597        [ C(OP_PREFETCH) ] = {
1598                [ C(RESULT_ACCESS) ] = -1,
1599                [ C(RESULT_MISS)   ] = -1,
1600        },
1601 },
1602};
1603
1604EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1605EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1606/* no_alloc_cycles.not_delivered */
1607EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1608               "event=0xca,umask=0x50");
1609EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1610/* uops_retired.all */
1611EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1612               "event=0xc2,umask=0x10");
1613/* uops_retired.all */
1614EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1615               "event=0xc2,umask=0x10");
1616
1617static struct attribute *slm_events_attrs[] = {
1618        EVENT_PTR(td_total_slots_slm),
1619        EVENT_PTR(td_total_slots_scale_slm),
1620        EVENT_PTR(td_fetch_bubbles_slm),
1621        EVENT_PTR(td_fetch_bubbles_scale_slm),
1622        EVENT_PTR(td_slots_issued_slm),
1623        EVENT_PTR(td_slots_retired_slm),
1624        NULL
1625};
1626
1627static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1628{
1629        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1630        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1631        INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1632        EVENT_EXTRA_END
1633};
1634
1635#define SLM_DMND_READ           SNB_DMND_DATA_RD
1636#define SLM_DMND_WRITE          SNB_DMND_RFO
1637#define SLM_DMND_PREFETCH       (SNB_PF_DATA_RD|SNB_PF_RFO)
1638
1639#define SLM_SNP_ANY             (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1640#define SLM_LLC_ACCESS          SNB_RESP_ANY
1641#define SLM_LLC_MISS            (SLM_SNP_ANY|SNB_NON_DRAM)
1642
1643static __initconst const u64 slm_hw_cache_extra_regs
1644                                [PERF_COUNT_HW_CACHE_MAX]
1645                                [PERF_COUNT_HW_CACHE_OP_MAX]
1646                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1647{
1648 [ C(LL  ) ] = {
1649        [ C(OP_READ) ] = {
1650                [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1651                [ C(RESULT_MISS)   ] = 0,
1652        },
1653        [ C(OP_WRITE) ] = {
1654                [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1655                [ C(RESULT_MISS)   ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1656        },
1657        [ C(OP_PREFETCH) ] = {
1658                [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1659                [ C(RESULT_MISS)   ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1660        },
1661 },
1662};
1663
1664static __initconst const u64 slm_hw_cache_event_ids
1665                                [PERF_COUNT_HW_CACHE_MAX]
1666                                [PERF_COUNT_HW_CACHE_OP_MAX]
1667                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1668{
1669 [ C(L1D) ] = {
1670        [ C(OP_READ) ] = {
1671                [ C(RESULT_ACCESS) ] = 0,
1672                [ C(RESULT_MISS)   ] = 0x0104, /* LD_DCU_MISS */
1673        },
1674        [ C(OP_WRITE) ] = {
1675                [ C(RESULT_ACCESS) ] = 0,
1676                [ C(RESULT_MISS)   ] = 0,
1677        },
1678        [ C(OP_PREFETCH) ] = {
1679                [ C(RESULT_ACCESS) ] = 0,
1680                [ C(RESULT_MISS)   ] = 0,
1681        },
1682 },
1683 [ C(L1I ) ] = {
1684        [ C(OP_READ) ] = {
1685                [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1686                [ C(RESULT_MISS)   ] = 0x0280, /* ICACGE.MISSES */
1687        },
1688        [ C(OP_WRITE) ] = {
1689                [ C(RESULT_ACCESS) ] = -1,
1690                [ C(RESULT_MISS)   ] = -1,
1691        },
1692        [ C(OP_PREFETCH) ] = {
1693                [ C(RESULT_ACCESS) ] = 0,
1694                [ C(RESULT_MISS)   ] = 0,
1695        },
1696 },
1697 [ C(LL  ) ] = {
1698        [ C(OP_READ) ] = {
1699                /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1700                [ C(RESULT_ACCESS) ] = 0x01b7,
1701                [ C(RESULT_MISS)   ] = 0,
1702        },
1703        [ C(OP_WRITE) ] = {
1704                /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1705                [ C(RESULT_ACCESS) ] = 0x01b7,
1706                /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1707                [ C(RESULT_MISS)   ] = 0x01b7,
1708        },
1709        [ C(OP_PREFETCH) ] = {
1710                /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1711                [ C(RESULT_ACCESS) ] = 0x01b7,
1712                /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1713                [ C(RESULT_MISS)   ] = 0x01b7,
1714        },
1715 },
1716 [ C(DTLB) ] = {
1717        [ C(OP_READ) ] = {
1718                [ C(RESULT_ACCESS) ] = 0,
1719                [ C(RESULT_MISS)   ] = 0x0804, /* LD_DTLB_MISS */
1720        },
1721        [ C(OP_WRITE) ] = {
1722                [ C(RESULT_ACCESS) ] = 0,
1723                [ C(RESULT_MISS)   ] = 0,
1724        },
1725        [ C(OP_PREFETCH) ] = {
1726                [ C(RESULT_ACCESS) ] = 0,
1727                [ C(RESULT_MISS)   ] = 0,
1728        },
1729 },
1730 [ C(ITLB) ] = {
1731        [ C(OP_READ) ] = {
1732                [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1733                [ C(RESULT_MISS)   ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
1734        },
1735        [ C(OP_WRITE) ] = {
1736                [ C(RESULT_ACCESS) ] = -1,
1737                [ C(RESULT_MISS)   ] = -1,
1738        },
1739        [ C(OP_PREFETCH) ] = {
1740                [ C(RESULT_ACCESS) ] = -1,
1741                [ C(RESULT_MISS)   ] = -1,
1742        },
1743 },
1744 [ C(BPU ) ] = {
1745        [ C(OP_READ) ] = {
1746                [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1747                [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1748        },
1749        [ C(OP_WRITE) ] = {
1750                [ C(RESULT_ACCESS) ] = -1,
1751                [ C(RESULT_MISS)   ] = -1,
1752        },
1753        [ C(OP_PREFETCH) ] = {
1754                [ C(RESULT_ACCESS) ] = -1,
1755                [ C(RESULT_MISS)   ] = -1,
1756        },
1757 },
1758};
1759
1760EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1761EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1762/* UOPS_NOT_DELIVERED.ANY */
1763EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1764/* ISSUE_SLOTS_NOT_CONSUMED.RECOVERY */
1765EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1766/* UOPS_RETIRED.ANY */
1767EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1768/* UOPS_ISSUED.ANY */
1769EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1770
1771static struct attribute *glm_events_attrs[] = {
1772        EVENT_PTR(td_total_slots_glm),
1773        EVENT_PTR(td_total_slots_scale_glm),
1774        EVENT_PTR(td_fetch_bubbles_glm),
1775        EVENT_PTR(td_recovery_bubbles_glm),
1776        EVENT_PTR(td_slots_issued_glm),
1777        EVENT_PTR(td_slots_retired_glm),
1778        NULL
1779};
1780
1781static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1782        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
1783        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1784        INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1785        EVENT_EXTRA_END
1786};
1787
1788#define GLM_DEMAND_DATA_RD              BIT_ULL(0)
1789#define GLM_DEMAND_RFO                  BIT_ULL(1)
1790#define GLM_ANY_RESPONSE                BIT_ULL(16)
1791#define GLM_SNP_NONE_OR_MISS            BIT_ULL(33)
1792#define GLM_DEMAND_READ                 GLM_DEMAND_DATA_RD
1793#define GLM_DEMAND_WRITE                GLM_DEMAND_RFO
1794#define GLM_DEMAND_PREFETCH             (SNB_PF_DATA_RD|SNB_PF_RFO)
1795#define GLM_LLC_ACCESS                  GLM_ANY_RESPONSE
1796#define GLM_SNP_ANY                     (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1797#define GLM_LLC_MISS                    (GLM_SNP_ANY|SNB_NON_DRAM)
1798
1799static __initconst const u64 glm_hw_cache_event_ids
1800                                [PERF_COUNT_HW_CACHE_MAX]
1801                                [PERF_COUNT_HW_CACHE_OP_MAX]
1802                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1803        [C(L1D)] = {
1804                [C(OP_READ)] = {
1805                        [C(RESULT_ACCESS)]      = 0x81d0,       /* MEM_UOPS_RETIRED.ALL_LOADS */
1806                        [C(RESULT_MISS)]        = 0x0,
1807                },
1808                [C(OP_WRITE)] = {
1809                        [C(RESULT_ACCESS)]      = 0x82d0,       /* MEM_UOPS_RETIRED.ALL_STORES */
1810                        [C(RESULT_MISS)]        = 0x0,
1811                },
1812                [C(OP_PREFETCH)] = {
1813                        [C(RESULT_ACCESS)]      = 0x0,
1814                        [C(RESULT_MISS)]        = 0x0,
1815                },
1816        },
1817        [C(L1I)] = {
1818                [C(OP_READ)] = {
1819                        [C(RESULT_ACCESS)]      = 0x0380,       /* ICACHE.ACCESSES */
1820                        [C(RESULT_MISS)]        = 0x0280,       /* ICACHE.MISSES */
1821                },
1822                [C(OP_WRITE)] = {
1823                        [C(RESULT_ACCESS)]      = -1,
1824                        [C(RESULT_MISS)]        = -1,
1825                },
1826                [C(OP_PREFETCH)] = {
1827                        [C(RESULT_ACCESS)]      = 0x0,
1828                        [C(RESULT_MISS)]        = 0x0,
1829                },
1830        },
1831        [C(LL)] = {
1832                [C(OP_READ)] = {
1833                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1834                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1835                },
1836                [C(OP_WRITE)] = {
1837                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1838                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1839                },
1840                [C(OP_PREFETCH)] = {
1841                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1842                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1843                },
1844        },
1845        [C(DTLB)] = {
1846                [C(OP_READ)] = {
1847                        [C(RESULT_ACCESS)]      = 0x81d0,       /* MEM_UOPS_RETIRED.ALL_LOADS */
1848                        [C(RESULT_MISS)]        = 0x0,
1849                },
1850                [C(OP_WRITE)] = {
1851                        [C(RESULT_ACCESS)]      = 0x82d0,       /* MEM_UOPS_RETIRED.ALL_STORES */
1852                        [C(RESULT_MISS)]        = 0x0,
1853                },
1854                [C(OP_PREFETCH)] = {
1855                        [C(RESULT_ACCESS)]      = 0x0,
1856                        [C(RESULT_MISS)]        = 0x0,
1857                },
1858        },
1859        [C(ITLB)] = {
1860                [C(OP_READ)] = {
1861                        [C(RESULT_ACCESS)]      = 0x00c0,       /* INST_RETIRED.ANY_P */
1862                        [C(RESULT_MISS)]        = 0x0481,       /* ITLB.MISS */
1863                },
1864                [C(OP_WRITE)] = {
1865                        [C(RESULT_ACCESS)]      = -1,
1866                        [C(RESULT_MISS)]        = -1,
1867                },
1868                [C(OP_PREFETCH)] = {
1869                        [C(RESULT_ACCESS)]      = -1,
1870                        [C(RESULT_MISS)]        = -1,
1871                },
1872        },
1873        [C(BPU)] = {
1874                [C(OP_READ)] = {
1875                        [C(RESULT_ACCESS)]      = 0x00c4,       /* BR_INST_RETIRED.ALL_BRANCHES */
1876                        [C(RESULT_MISS)]        = 0x00c5,       /* BR_MISP_RETIRED.ALL_BRANCHES */
1877                },
1878                [C(OP_WRITE)] = {
1879                        [C(RESULT_ACCESS)]      = -1,
1880                        [C(RESULT_MISS)]        = -1,
1881                },
1882                [C(OP_PREFETCH)] = {
1883                        [C(RESULT_ACCESS)]      = -1,
1884                        [C(RESULT_MISS)]        = -1,
1885                },
1886        },
1887};
1888
1889static __initconst const u64 glm_hw_cache_extra_regs
1890                                [PERF_COUNT_HW_CACHE_MAX]
1891                                [PERF_COUNT_HW_CACHE_OP_MAX]
1892                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1893        [C(LL)] = {
1894                [C(OP_READ)] = {
1895                        [C(RESULT_ACCESS)]      = GLM_DEMAND_READ|
1896                                                  GLM_LLC_ACCESS,
1897                        [C(RESULT_MISS)]        = GLM_DEMAND_READ|
1898                                                  GLM_LLC_MISS,
1899                },
1900                [C(OP_WRITE)] = {
1901                        [C(RESULT_ACCESS)]      = GLM_DEMAND_WRITE|
1902                                                  GLM_LLC_ACCESS,
1903                        [C(RESULT_MISS)]        = GLM_DEMAND_WRITE|
1904                                                  GLM_LLC_MISS,
1905                },
1906                [C(OP_PREFETCH)] = {
1907                        [C(RESULT_ACCESS)]      = GLM_DEMAND_PREFETCH|
1908                                                  GLM_LLC_ACCESS,
1909                        [C(RESULT_MISS)]        = GLM_DEMAND_PREFETCH|
1910                                                  GLM_LLC_MISS,
1911                },
1912        },
1913};
1914
1915static __initconst const u64 glp_hw_cache_event_ids
1916                                [PERF_COUNT_HW_CACHE_MAX]
1917                                [PERF_COUNT_HW_CACHE_OP_MAX]
1918                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1919        [C(L1D)] = {
1920                [C(OP_READ)] = {
1921                        [C(RESULT_ACCESS)]      = 0x81d0,       /* MEM_UOPS_RETIRED.ALL_LOADS */
1922                        [C(RESULT_MISS)]        = 0x0,
1923                },
1924                [C(OP_WRITE)] = {
1925                        [C(RESULT_ACCESS)]      = 0x82d0,       /* MEM_UOPS_RETIRED.ALL_STORES */
1926                        [C(RESULT_MISS)]        = 0x0,
1927                },
1928                [C(OP_PREFETCH)] = {
1929                        [C(RESULT_ACCESS)]      = 0x0,
1930                        [C(RESULT_MISS)]        = 0x0,
1931                },
1932        },
1933        [C(L1I)] = {
1934                [C(OP_READ)] = {
1935                        [C(RESULT_ACCESS)]      = 0x0380,       /* ICACHE.ACCESSES */
1936                        [C(RESULT_MISS)]        = 0x0280,       /* ICACHE.MISSES */
1937                },
1938                [C(OP_WRITE)] = {
1939                        [C(RESULT_ACCESS)]      = -1,
1940                        [C(RESULT_MISS)]        = -1,
1941                },
1942                [C(OP_PREFETCH)] = {
1943                        [C(RESULT_ACCESS)]      = 0x0,
1944                        [C(RESULT_MISS)]        = 0x0,
1945                },
1946        },
1947        [C(LL)] = {
1948                [C(OP_READ)] = {
1949                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1950                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1951                },
1952                [C(OP_WRITE)] = {
1953                        [C(RESULT_ACCESS)]      = 0x1b7,        /* OFFCORE_RESPONSE */
1954                        [C(RESULT_MISS)]        = 0x1b7,        /* OFFCORE_RESPONSE */
1955                },
1956                [C(OP_PREFETCH)] = {
1957                        [C(RESULT_ACCESS)]      = 0x0,
1958                        [C(RESULT_MISS)]        = 0x0,
1959                },
1960        },
1961        [C(DTLB)] = {
1962                [C(OP_READ)] = {
1963                        [C(RESULT_ACCESS)]      = 0x81d0,       /* MEM_UOPS_RETIRED.ALL_LOADS */
1964                        [C(RESULT_MISS)]        = 0xe08,        /* DTLB_LOAD_MISSES.WALK_COMPLETED */
1965                },
1966                [C(OP_WRITE)] = {
1967                        [C(RESULT_ACCESS)]      = 0x82d0,       /* MEM_UOPS_RETIRED.ALL_STORES */
1968                        [C(RESULT_MISS)]        = 0xe49,        /* DTLB_STORE_MISSES.WALK_COMPLETED */
1969                },
1970                [C(OP_PREFETCH)] = {
1971                        [C(RESULT_ACCESS)]      = 0x0,
1972                        [C(RESULT_MISS)]        = 0x0,
1973                },
1974        },
1975        [C(ITLB)] = {
1976                [C(OP_READ)] = {
1977                        [C(RESULT_ACCESS)]      = 0x00c0,       /* INST_RETIRED.ANY_P */
1978                        [C(RESULT_MISS)]        = 0x0481,       /* ITLB.MISS */
1979                },
1980                [C(OP_WRITE)] = {
1981                        [C(RESULT_ACCESS)]      = -1,
1982                        [C(RESULT_MISS)]        = -1,
1983                },
1984                [C(OP_PREFETCH)] = {
1985                        [C(RESULT_ACCESS)]      = -1,
1986                        [C(RESULT_MISS)]        = -1,
1987                },
1988        },
1989        [C(BPU)] = {
1990                [C(OP_READ)] = {
1991                        [C(RESULT_ACCESS)]      = 0x00c4,       /* BR_INST_RETIRED.ALL_BRANCHES */
1992                        [C(RESULT_MISS)]        = 0x00c5,       /* BR_MISP_RETIRED.ALL_BRANCHES */
1993                },
1994                [C(OP_WRITE)] = {
1995                        [C(RESULT_ACCESS)]      = -1,
1996                        [C(RESULT_MISS)]        = -1,
1997                },
1998                [C(OP_PREFETCH)] = {
1999                        [C(RESULT_ACCESS)]      = -1,
2000                        [C(RESULT_MISS)]        = -1,
2001                },
2002        },
2003};
2004
2005static __initconst const u64 glp_hw_cache_extra_regs
2006                                [PERF_COUNT_HW_CACHE_MAX]
2007                                [PERF_COUNT_HW_CACHE_OP_MAX]
2008                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2009        [C(LL)] = {
2010                [C(OP_READ)] = {
2011                        [C(RESULT_ACCESS)]      = GLM_DEMAND_READ|
2012                                                  GLM_LLC_ACCESS,
2013                        [C(RESULT_MISS)]        = GLM_DEMAND_READ|
2014                                                  GLM_LLC_MISS,
2015                },
2016                [C(OP_WRITE)] = {
2017                        [C(RESULT_ACCESS)]      = GLM_DEMAND_WRITE|
2018                                                  GLM_LLC_ACCESS,
2019                        [C(RESULT_MISS)]        = GLM_DEMAND_WRITE|
2020                                                  GLM_LLC_MISS,
2021                },
2022                [C(OP_PREFETCH)] = {
2023                        [C(RESULT_ACCESS)]      = 0x0,
2024                        [C(RESULT_MISS)]        = 0x0,
2025                },
2026        },
2027};
2028
2029#define TNT_LOCAL_DRAM                  BIT_ULL(26)
2030#define TNT_DEMAND_READ                 GLM_DEMAND_DATA_RD
2031#define TNT_DEMAND_WRITE                GLM_DEMAND_RFO
2032#define TNT_LLC_ACCESS                  GLM_ANY_RESPONSE
2033#define TNT_SNP_ANY                     (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
2034                                         SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
2035#define TNT_LLC_MISS                    (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
2036
2037static __initconst const u64 tnt_hw_cache_extra_regs
2038                                [PERF_COUNT_HW_CACHE_MAX]
2039                                [PERF_COUNT_HW_CACHE_OP_MAX]
2040                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2041        [C(LL)] = {
2042                [C(OP_READ)] = {
2043                        [C(RESULT_ACCESS)]      = TNT_DEMAND_READ|
2044                                                  TNT_LLC_ACCESS,
2045                        [C(RESULT_MISS)]        = TNT_DEMAND_READ|
2046                                                  TNT_LLC_MISS,
2047                },
2048                [C(OP_WRITE)] = {
2049                        [C(RESULT_ACCESS)]      = TNT_DEMAND_WRITE|
2050                                                  TNT_LLC_ACCESS,
2051                        [C(RESULT_MISS)]        = TNT_DEMAND_WRITE|
2052                                                  TNT_LLC_MISS,
2053                },
2054                [C(OP_PREFETCH)] = {
2055                        [C(RESULT_ACCESS)]      = 0x0,
2056                        [C(RESULT_MISS)]        = 0x0,
2057                },
2058        },
2059};
2060
2061EVENT_ATTR_STR(topdown-fe-bound,       td_fe_bound_tnt,        "event=0x71,umask=0x0");
2062EVENT_ATTR_STR(topdown-retiring,       td_retiring_tnt,        "event=0xc2,umask=0x0");
2063EVENT_ATTR_STR(topdown-bad-spec,       td_bad_spec_tnt,        "event=0x73,umask=0x6");
2064EVENT_ATTR_STR(topdown-be-bound,       td_be_bound_tnt,        "event=0x74,umask=0x0");
2065
2066static struct attribute *tnt_events_attrs[] = {
2067        EVENT_PTR(td_fe_bound_tnt),
2068        EVENT_PTR(td_retiring_tnt),
2069        EVENT_PTR(td_bad_spec_tnt),
2070        EVENT_PTR(td_be_bound_tnt),
2071        NULL,
2072};
2073
2074static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
2075        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2076        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
2077        INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
2078        EVENT_EXTRA_END
2079};
2080
2081static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
2082        /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
2083        INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
2084        INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
2085        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2086        EVENT_EXTRA_END
2087};
2088
2089#define KNL_OT_L2_HITE          BIT_ULL(19) /* Other Tile L2 Hit */
2090#define KNL_OT_L2_HITF          BIT_ULL(20) /* Other Tile L2 Hit */
2091#define KNL_MCDRAM_LOCAL        BIT_ULL(21)
2092#define KNL_MCDRAM_FAR          BIT_ULL(22)
2093#define KNL_DDR_LOCAL           BIT_ULL(23)
2094#define KNL_DDR_FAR             BIT_ULL(24)
2095#define KNL_DRAM_ANY            (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
2096                                    KNL_DDR_LOCAL | KNL_DDR_FAR)
2097#define KNL_L2_READ             SLM_DMND_READ
2098#define KNL_L2_WRITE            SLM_DMND_WRITE
2099#define KNL_L2_PREFETCH         SLM_DMND_PREFETCH
2100#define KNL_L2_ACCESS           SLM_LLC_ACCESS
2101#define KNL_L2_MISS             (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
2102                                   KNL_DRAM_ANY | SNB_SNP_ANY | \
2103                                                  SNB_NON_DRAM)
2104
2105static __initconst const u64 knl_hw_cache_extra_regs
2106                                [PERF_COUNT_HW_CACHE_MAX]
2107                                [PERF_COUNT_HW_CACHE_OP_MAX]
2108                                [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2109        [C(LL)] = {
2110                [C(OP_READ)] = {
2111                        [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
2112                        [C(RESULT_MISS)]   = 0,
2113                },
2114                [C(OP_WRITE)] = {
2115                        [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
2116                        [C(RESULT_MISS)]   = KNL_L2_WRITE | KNL_L2_MISS,
2117                },
2118                [C(OP_PREFETCH)] = {
2119                        [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
2120                        [C(RESULT_MISS)]   = KNL_L2_PREFETCH | KNL_L2_MISS,
2121                },
2122        },
2123};
2124
2125/*
2126 * Used from PMIs where the LBRs are already disabled.
2127 *
2128 * This function could be called consecutively. It is required to remain in
2129 * disabled state if called consecutively.
2130 *
2131 * During consecutive calls, the same disable value will be written to related
2132 * registers, so the PMU state remains unchanged.
2133 *
2134 * intel_bts events don't coexist with intel PMU's BTS events because of
2135 * x86_add_exclusive(x86_lbr_exclusive_lbr); there's no need to keep them
2136 * disabled around intel PMU's event batching etc, only inside the PMI handler.
2137 *
2138 * Avoid PEBS_ENABLE MSR access in PMIs.
2139 * The GLOBAL_CTRL has been disabled. All the counters do not count anymore.
2140 * It doesn't matter if the PEBS is enabled or not.
2141 * Usually, the PEBS status are not changed in PMIs. It's unnecessary to
2142 * access PEBS_ENABLE MSR in disable_all()/enable_all().
2143 * However, there are some cases which may change PEBS status, e.g. PMI
2144 * throttle. The PEBS_ENABLE should be updated where the status changes.
2145 */
2146static void __intel_pmu_disable_all(void)
2147{
2148        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2149
2150        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2151
2152        if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
2153                intel_pmu_disable_bts();
2154}
2155
2156static void intel_pmu_disable_all(void)
2157{
2158        __intel_pmu_disable_all();
2159        intel_pmu_pebs_disable_all();
2160        intel_pmu_lbr_disable_all();
2161}
2162
2163static void __intel_pmu_enable_all(int added, bool pmi)
2164{
2165        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2166        u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2167
2168        intel_pmu_lbr_enable_all(pmi);
2169        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
2170               intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
2171
2172        if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2173                struct perf_event *event =
2174                        cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
2175
2176                if (WARN_ON_ONCE(!event))
2177                        return;
2178
2179                intel_pmu_enable_bts(event->hw.config);
2180        }
2181}
2182
2183static void intel_pmu_enable_all(int added)
2184{
2185        intel_pmu_pebs_enable_all();
2186        __intel_pmu_enable_all(added, false);
2187}
2188
2189/*
2190 * Workaround for:
2191 *   Intel Errata AAK100 (model 26)
2192 *   Intel Errata AAP53  (model 30)
2193 *   Intel Errata BD53   (model 44)
2194 *
2195 * The official story:
2196 *   These chips need to be 'reset' when adding counters by programming the
2197 *   magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
2198 *   in sequence on the same PMC or on different PMCs.
2199 *
2200 * In practice it appears some of these events do in fact count, and
2201 * we need to program all 4 events.
2202 */
2203static void intel_pmu_nhm_workaround(void)
2204{
2205        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2206        static const unsigned long nhm_magic[4] = {
2207                0x4300B5,
2208                0x4300D2,
2209                0x4300B1,
2210                0x4300B1
2211        };
2212        struct perf_event *event;
2213        int i;
2214
2215        /*
2216         * The Errata requires below steps:
2217         * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
2218         * 2) Configure 4 PERFEVTSELx with the magic events and clear
2219         *    the corresponding PMCx;
2220         * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
2221         * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
2222         * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
2223         */
2224
2225        /*
2226         * The real steps we choose are a little different from above.
2227         * A) To reduce MSR operations, we don't run step 1) as they
2228         *    are already cleared before this function is called;
2229         * B) Call x86_perf_event_update to save PMCx before configuring
2230         *    PERFEVTSELx with magic number;
2231         * C) With step 5), we do clear only when the PERFEVTSELx is
2232         *    not used currently.
2233         * D) Call x86_perf_event_set_period to restore PMCx;
2234         */
2235
2236        /* We always operate 4 pairs of PERF Counters */
2237        for (i = 0; i < 4; i++) {
2238                event = cpuc->events[i];
2239                if (event)
2240                        x86_perf_event_update(event);
2241        }
2242
2243        for (i = 0; i < 4; i++) {
2244                wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2245                wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2246        }
2247
2248        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2249        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2250
2251        for (i = 0; i < 4; i++) {
2252                event = cpuc->events[i];
2253
2254                if (event) {
2255                        x86_perf_event_set_period(event);
2256                        __x86_pmu_enable_event(&event->hw,
2257                                        ARCH_PERFMON_EVENTSEL_ENABLE);
2258                } else
2259                        wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2260        }
2261}
2262
2263static void intel_pmu_nhm_enable_all(int added)
2264{
2265        if (added)
2266                intel_pmu_nhm_workaround();
2267        intel_pmu_enable_all(added);
2268}
2269
2270static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2271{
2272        u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2273
2274        if (cpuc->tfa_shadow != val) {
2275                cpuc->tfa_shadow = val;
2276                wrmsrl(MSR_TSX_FORCE_ABORT, val);
2277        }
2278}
2279
2280static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2281{
2282        /*
2283         * We're going to use PMC3, make sure TFA is set before we touch it.
2284         */
2285        if (cntr == 3)
2286                intel_set_tfa(cpuc, true);
2287}
2288
2289static void intel_tfa_pmu_enable_all(int added)
2290{
2291        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2292
2293        /*
2294         * If we find PMC3 is no longer used when we enable the PMU, we can
2295         * clear TFA.
2296         */
2297        if (!test_bit(3, cpuc->active_mask))
2298                intel_set_tfa(cpuc, false);
2299
2300        intel_pmu_enable_all(added);
2301}
2302
2303static inline u64 intel_pmu_get_status(void)
2304{
2305        u64 status;
2306
2307        rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2308
2309        return status;
2310}
2311
2312static inline void intel_pmu_ack_status(u64 ack)
2313{
2314        wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2315}
2316
2317static inline bool event_is_checkpointed(struct perf_event *event)
2318{
2319        return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2320}
2321
2322static inline void intel_set_masks(struct perf_event *event, int idx)
2323{
2324        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2325
2326        if (event->attr.exclude_host)
2327                __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2328        if (event->attr.exclude_guest)
2329                __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2330        if (event_is_checkpointed(event))
2331                __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2332}
2333
2334static inline void intel_clear_masks(struct perf_event *event, int idx)
2335{
2336        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2337
2338        __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2339        __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2340        __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2341}
2342
2343static void intel_pmu_disable_fixed(struct perf_event *event)
2344{
2345        struct hw_perf_event *hwc = &event->hw;
2346        u64 ctrl_val, mask;
2347        int idx = hwc->idx;
2348
2349        if (is_topdown_idx(idx)) {
2350                struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2351
2352                /*
2353                 * When there are other active TopDown events,
2354                 * don't disable the fixed counter 3.
2355                 */
2356                if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2357                        return;
2358                idx = INTEL_PMC_IDX_FIXED_SLOTS;
2359        }
2360
2361        intel_clear_masks(event, idx);
2362
2363        mask = 0xfULL << ((idx - INTEL_PMC_IDX_FIXED) * 4);
2364        rdmsrl(hwc->config_base, ctrl_val);
2365        ctrl_val &= ~mask;
2366        wrmsrl(hwc->config_base, ctrl_val);
2367}
2368
2369static void intel_pmu_disable_event(struct perf_event *event)
2370{
2371        struct hw_perf_event *hwc = &event->hw;
2372        int idx = hwc->idx;
2373
2374        switch (idx) {
2375        case 0 ... INTEL_PMC_IDX_FIXED - 1:
2376                intel_clear_masks(event, idx);
2377                x86_pmu_disable_event(event);
2378                break;
2379        case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2380        case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2381                intel_pmu_disable_fixed(event);
2382                break;
2383        case INTEL_PMC_IDX_FIXED_BTS:
2384                intel_pmu_disable_bts();
2385                intel_pmu_drain_bts_buffer();
2386                return;
2387        case INTEL_PMC_IDX_FIXED_VLBR:
2388                intel_clear_masks(event, idx);
2389                break;
2390        default:
2391                intel_clear_masks(event, idx);
2392                pr_warn("Failed to disable the event with invalid index %d\n",
2393                        idx);
2394                return;
2395        }
2396
2397        /*
2398         * Needs to be called after x86_pmu_disable_event,
2399         * so we don't trigger the event without PEBS bit set.
2400         */
2401        if (unlikely(event->attr.precise_ip))
2402                intel_pmu_pebs_disable(event);
2403}
2404
2405static void intel_pmu_del_event(struct perf_event *event)
2406{
2407        if (needs_branch_stack(event))
2408                intel_pmu_lbr_del(event);
2409        if (event->attr.precise_ip)
2410                intel_pmu_pebs_del(event);
2411}
2412
2413static int icl_set_topdown_event_period(struct perf_event *event)
2414{
2415        struct hw_perf_event *hwc = &event->hw;
2416        s64 left = local64_read(&hwc->period_left);
2417
2418        /*
2419         * The values in PERF_METRICS MSR are derived from fixed counter 3.
2420         * Software should start both registers, PERF_METRICS and fixed
2421         * counter 3, from zero.
2422         * Clear PERF_METRICS and Fixed counter 3 in initialization.
2423         * After that, both MSRs will be cleared for each read.
2424         * Don't need to clear them again.
2425         */
2426        if (left == x86_pmu.max_period) {
2427                wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2428                wrmsrl(MSR_PERF_METRICS, 0);
2429                hwc->saved_slots = 0;
2430                hwc->saved_metric = 0;
2431        }
2432
2433        if ((hwc->saved_slots) && is_slots_event(event)) {
2434                wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
2435                wrmsrl(MSR_PERF_METRICS, hwc->saved_metric);
2436        }
2437
2438        perf_event_update_userpage(event);
2439
2440        return 0;
2441}
2442
2443static int adl_set_topdown_event_period(struct perf_event *event)
2444{
2445        struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
2446
2447        if (pmu->cpu_type != hybrid_big)
2448                return 0;
2449
2450        return icl_set_topdown_event_period(event);
2451}
2452
2453static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
2454{
2455        u32 val;
2456
2457        /*
2458         * The metric is reported as an 8bit integer fraction
2459         * summing up to 0xff.
2460         * slots-in-metric = (Metric / 0xff) * slots
2461         */
2462        val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
2463        return  mul_u64_u32_div(slots, val, 0xff);
2464}
2465
2466static u64 icl_get_topdown_value(struct perf_event *event,
2467                                       u64 slots, u64 metrics)
2468{
2469        int idx = event->hw.idx;
2470        u64 delta;
2471
2472        if (is_metric_idx(idx))
2473                delta = icl_get_metrics_event_value(metrics, slots, idx);
2474        else
2475                delta = slots;
2476
2477        return delta;
2478}
2479
2480static void __icl_update_topdown_event(struct perf_event *event,
2481                                       u64 slots, u64 metrics,
2482                                       u64 last_slots, u64 last_metrics)
2483{
2484        u64 delta, last = 0;
2485
2486        delta = icl_get_topdown_value(event, slots, metrics);
2487        if (last_slots)
2488                last = icl_get_topdown_value(event, last_slots, last_metrics);
2489
2490        /*
2491         * The 8bit integer fraction of metric may be not accurate,
2492         * especially when the changes is very small.
2493         * For example, if only a few bad_spec happens, the fraction
2494         * may be reduced from 1 to 0. If so, the bad_spec event value
2495         * will be 0 which is definitely less than the last value.
2496         * Avoid update event->count for this case.
2497         */
2498        if (delta > last) {
2499                delta -= last;
2500                local64_add(delta, &event->count);
2501        }
2502}
2503
2504static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
2505                                      u64 metrics, int metric_end)
2506{
2507        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2508        struct perf_event *other;
2509        int idx;
2510
2511        event->hw.saved_slots = slots;
2512        event->hw.saved_metric = metrics;
2513
2514        for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2515                if (!is_topdown_idx(idx))
2516                        continue;
2517                other = cpuc->events[idx];
2518                other->hw.saved_slots = slots;
2519                other->hw.saved_metric = metrics;
2520        }
2521}
2522
2523/*
2524 * Update all active Topdown events.
2525 *
2526 * The PERF_METRICS and Fixed counter 3 are read separately. The values may be
2527 * modify by a NMI. PMU has to be disabled before calling this function.
2528 */
2529
2530static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
2531{
2532        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2533        struct perf_event *other;
2534        u64 slots, metrics;
2535        bool reset = true;
2536        int idx;
2537
2538        /* read Fixed counter 3 */
2539        rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
2540        if (!slots)
2541                return 0;
2542
2543        /* read PERF_METRICS */
2544        rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
2545
2546        for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2547                if (!is_topdown_idx(idx))
2548                        continue;
2549                other = cpuc->events[idx];
2550                __icl_update_topdown_event(other, slots, metrics,
2551                                           event ? event->hw.saved_slots : 0,
2552                                           event ? event->hw.saved_metric : 0);
2553        }
2554
2555        /*
2556         * Check and update this event, which may have been cleared
2557         * in active_mask e.g. x86_pmu_stop()
2558         */
2559        if (event && !test_bit(event->hw.idx, cpuc->active_mask)) {
2560                __icl_update_topdown_event(event, slots, metrics,
2561                                           event->hw.saved_slots,
2562                                           event->hw.saved_metric);
2563
2564                /*
2565                 * In x86_pmu_stop(), the event is cleared in active_mask first,
2566                 * then drain the delta, which indicates context switch for
2567                 * counting.
2568                 * Save metric and slots for context switch.
2569                 * Don't need to reset the PERF_METRICS and Fixed counter 3.
2570                 * Because the values will be restored in next schedule in.
2571                 */
2572                update_saved_topdown_regs(event, slots, metrics, metric_end);
2573                reset = false;
2574        }
2575
2576        if (reset) {
2577                /* The fixed counter 3 has to be written before the PERF_METRICS. */
2578                wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2579                wrmsrl(MSR_PERF_METRICS, 0);
2580                if (event)
2581                        update_saved_topdown_regs(event, 0, 0, metric_end);
2582        }
2583
2584        return slots;
2585}
2586
2587static u64 icl_update_topdown_event(struct perf_event *event)
2588{
2589        return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE +
2590                                                 x86_pmu.num_topdown_events - 1);
2591}
2592
2593static u64 adl_update_topdown_event(struct perf_event *event)
2594{
2595        struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
2596
2597        if (pmu->cpu_type != hybrid_big)
2598                return 0;
2599
2600        return icl_update_topdown_event(event);
2601}
2602
2603
2604static void intel_pmu_read_topdown_event(struct perf_event *event)
2605{
2606        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2607
2608        /* Only need to call update_topdown_event() once for group read. */
2609        if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
2610            !is_slots_event(event))
2611                return;
2612
2613        perf_pmu_disable(event->pmu);
2614        x86_pmu.update_topdown_event(event);
2615        perf_pmu_enable(event->pmu);
2616}
2617
2618static void intel_pmu_read_event(struct perf_event *event)
2619{
2620        if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2621                intel_pmu_auto_reload_read(event);
2622        else if (is_topdown_count(event) && x86_pmu.update_topdown_event)
2623                intel_pmu_read_topdown_event(event);
2624        else
2625                x86_perf_event_update(event);
2626}
2627
2628static void intel_pmu_enable_fixed(struct perf_event *event)
2629{
2630        struct hw_perf_event *hwc = &event->hw;
2631        u64 ctrl_val, mask, bits = 0;
2632        int idx = hwc->idx;
2633
2634        if (is_topdown_idx(idx)) {
2635                struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2636                /*
2637                 * When there are other active TopDown events,
2638                 * don't enable the fixed counter 3 again.
2639                 */
2640                if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2641                        return;
2642
2643                idx = INTEL_PMC_IDX_FIXED_SLOTS;
2644        }
2645
2646        intel_set_masks(event, idx);
2647
2648        /*
2649         * Enable IRQ generation (0x8), if not PEBS,
2650         * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
2651         * if requested:
2652         */
2653        if (!event->attr.precise_ip)
2654                bits |= 0x8;
2655        if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2656                bits |= 0x2;
2657        if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2658                bits |= 0x1;
2659
2660        /*
2661         * ANY bit is supported in v3 and up
2662         */
2663        if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2664                bits |= 0x4;
2665
2666        idx -= INTEL_PMC_IDX_FIXED;
2667        bits <<= (idx * 4);
2668        mask = 0xfULL << (idx * 4);
2669
2670        if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
2671                bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2672                mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2673        }
2674
2675        rdmsrl(hwc->config_base, ctrl_val);
2676        ctrl_val &= ~mask;
2677        ctrl_val |= bits;
2678        wrmsrl(hwc->config_base, ctrl_val);
2679}
2680
2681static void intel_pmu_enable_event(struct perf_event *event)
2682{
2683        struct hw_perf_event *hwc = &event->hw;
2684        int idx = hwc->idx;
2685
2686        if (unlikely(event->attr.precise_ip))
2687                intel_pmu_pebs_enable(event);
2688
2689        switch (idx) {
2690        case 0 ... INTEL_PMC_IDX_FIXED - 1:
2691                intel_set_masks(event, idx);
2692                __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2693                break;
2694        case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2695        case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2696                intel_pmu_enable_fixed(event);
2697                break;
2698        case INTEL_PMC_IDX_FIXED_BTS:
2699                if (!__this_cpu_read(cpu_hw_events.enabled))
2700                        return;
2701                intel_pmu_enable_bts(hwc->config);
2702                break;
2703        case INTEL_PMC_IDX_FIXED_VLBR:
2704                intel_set_masks(event, idx);
2705                break;
2706        default:
2707                pr_warn("Failed to enable the event with invalid index %d\n",
2708                        idx);
2709        }
2710}
2711
2712static void intel_pmu_add_event(struct perf_event *event)
2713{
2714        if (event->attr.precise_ip)
2715                intel_pmu_pebs_add(event);
2716        if (needs_branch_stack(event))
2717                intel_pmu_lbr_add(event);
2718}
2719
2720/*
2721 * Save and restart an expired event. Called by NMI contexts,
2722 * so it has to be careful about preempting normal event ops:
2723 */
2724int intel_pmu_save_and_restart(struct perf_event *event)
2725{
2726        x86_perf_event_update(event);
2727        /*
2728         * For a checkpointed counter always reset back to 0.  This
2729         * avoids a situation where the counter overflows, aborts the
2730         * transaction and is then set back to shortly before the
2731         * overflow, and overflows and aborts again.
2732         */
2733        if (unlikely(event_is_checkpointed(event))) {
2734                /* No race with NMIs because the counter should not be armed */
2735                wrmsrl(event->hw.event_base, 0);
2736                local64_set(&event->hw.prev_count, 0);
2737        }
2738        return x86_perf_event_set_period(event);
2739}
2740
2741static void intel_pmu_reset(void)
2742{
2743        struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2744        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2745        int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
2746        int num_counters = hybrid(cpuc->pmu, num_counters);
2747        unsigned long flags;
2748        int idx;
2749
2750        if (!num_counters)
2751                return;
2752
2753        local_irq_save(flags);
2754
2755        pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2756
2757        for (idx = 0; idx < num_counters; idx++) {
2758                wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2759                wrmsrl_safe(x86_pmu_event_addr(idx),  0ull);
2760        }
2761        for (idx = 0; idx < num_counters_fixed; idx++) {
2762                if (fixed_counter_disabled(idx, cpuc->pmu))
2763                        continue;
2764                wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2765        }
2766
2767        if (ds)
2768                ds->bts_index = ds->bts_buffer_base;
2769
2770        /* Ack all overflows and disable fixed counters */
2771        if (x86_pmu.version >= 2) {
2772                intel_pmu_ack_status(intel_pmu_get_status());
2773                wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2774        }
2775
2776        /* Reset LBRs and LBR freezing */
2777        if (x86_pmu.lbr_nr) {
2778                update_debugctlmsr(get_debugctlmsr() &
2779                        ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2780        }
2781
2782        local_irq_restore(flags);
2783}
2784
2785static int handle_pmi_common(struct pt_regs *regs, u64 status)
2786{
2787        struct perf_sample_data data;
2788        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2789        int bit;
2790        int handled = 0;
2791        u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2792
2793        inc_irq_stat(apic_perf_irqs);
2794
2795        /*
2796         * Ignore a range of extra bits in status that do not indicate
2797         * overflow by themselves.
2798         */
2799        status &= ~(GLOBAL_STATUS_COND_CHG |
2800                    GLOBAL_STATUS_ASIF |
2801                    GLOBAL_STATUS_LBRS_FROZEN);
2802        if (!status)
2803                return 0;
2804        /*
2805         * In case multiple PEBS events are sampled at the same time,
2806         * it is possible to have GLOBAL_STATUS bit 62 set indicating
2807         * PEBS buffer overflow and also seeing at most 3 PEBS counters
2808         * having their bits set in the status register. This is a sign
2809         * that there was at least one PEBS record pending at the time
2810         * of the PMU interrupt. PEBS counters must only be processed
2811         * via the drain_pebs() calls and not via the regular sample
2812         * processing loop coming after that the function, otherwise
2813         * phony regular samples may be generated in the sampling buffer
2814         * not marked with the EXACT tag. Another possibility is to have
2815         * one PEBS event and at least one non-PEBS event which overflows
2816         * while PEBS has armed. In this case, bit 62 of GLOBAL_STATUS will
2817         * not be set, yet the overflow status bit for the PEBS counter will
2818         * be on Skylake.
2819         *
2820         * To avoid this problem, we systematically ignore the PEBS-enabled
2821         * counters from the GLOBAL_STATUS mask and we always process PEBS
2822         * events via drain_pebs().
2823         */
2824        if (x86_pmu.flags & PMU_FL_PEBS_ALL)
2825                status &= ~cpuc->pebs_enabled;
2826        else
2827                status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
2828
2829        /*
2830         * PEBS overflow sets bit 62 in the global status register
2831         */
2832        if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) {
2833                u64 pebs_enabled = cpuc->pebs_enabled;
2834
2835                handled++;
2836                x86_pmu.drain_pebs(regs, &data);
2837                status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
2838
2839                /*
2840                 * PMI throttle may be triggered, which stops the PEBS event.
2841                 * Although cpuc->pebs_enabled is updated accordingly, the
2842                 * MSR_IA32_PEBS_ENABLE is not updated. Because the
2843                 * cpuc->enabled has been forced to 0 in PMI.
2844                 * Update the MSR if pebs_enabled is changed.
2845                 */
2846                if (pebs_enabled != cpuc->pebs_enabled)
2847                        wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
2848        }
2849
2850        /*
2851         * Intel PT
2852         */
2853        if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
2854                handled++;
2855                if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
2856                        perf_guest_cbs->handle_intel_pt_intr))
2857                        perf_guest_cbs->handle_intel_pt_intr();
2858                else
2859                        intel_pt_interrupt();
2860        }
2861
2862        /*
2863         * Intel Perf metrics
2864         */
2865        if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
2866                handled++;
2867                if (x86_pmu.update_topdown_event)
2868                        x86_pmu.update_topdown_event(NULL);
2869        }
2870
2871        /*
2872         * Checkpointed counters can lead to 'spurious' PMIs because the
2873         * rollback caused by the PMI will have cleared the overflow status
2874         * bit. Therefore always force probe these counters.
2875         */
2876        status |= cpuc->intel_cp_status;
2877
2878        for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2879                struct perf_event *event = cpuc->events[bit];
2880
2881                handled++;
2882
2883                if (!test_bit(bit, cpuc->active_mask))
2884                        continue;
2885
2886                if (!intel_pmu_save_and_restart(event))
2887                        continue;
2888
2889                perf_sample_data_init(&data, 0, event->hw.last_period);
2890
2891                if (has_branch_stack(event))
2892                        data.br_stack = &cpuc->lbr_stack;
2893
2894                if (perf_event_overflow(event, &data, regs))
2895                        x86_pmu_stop(event, 0);
2896        }
2897
2898        return handled;
2899}
2900
2901/*
2902 * This handler is triggered by the local APIC, so the APIC IRQ handling
2903 * rules apply:
2904 */
2905static int intel_pmu_handle_irq(struct pt_regs *regs)
2906{
2907        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2908        bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
2909        bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
2910        int loops;
2911        u64 status;
2912        int handled;
2913        int pmu_enabled;
2914
2915        /*
2916         * Save the PMU state.
2917         * It needs to be restored when leaving the handler.
2918         */
2919        pmu_enabled = cpuc->enabled;
2920        /*
2921         * In general, the early ACK is only applied for old platforms.
2922         * For the big core starts from Haswell, the late ACK should be
2923         * applied.
2924         * For the small core after Tremont, we have to do the ACK right
2925         * before re-enabling counters, which is in the middle of the
2926         * NMI handler.
2927         */
2928        if (!late_ack && !mid_ack)
2929                apic_write(APIC_LVTPC, APIC_DM_NMI);
2930        intel_bts_disable_local();
2931        cpuc->enabled = 0;
2932        __intel_pmu_disable_all();
2933        handled = intel_pmu_drain_bts_buffer();
2934        handled += intel_bts_interrupt();
2935        status = intel_pmu_get_status();
2936        if (!status)
2937                goto done;
2938
2939        loops = 0;
2940again:
2941        intel_pmu_lbr_read();
2942        intel_pmu_ack_status(status);
2943        if (++loops > 100) {
2944                static bool warned;
2945
2946                if (!warned) {
2947                        WARN(1, "perfevents: irq loop stuck!\n");
2948                        perf_event_print_debug();
2949                        warned = true;
2950                }
2951                intel_pmu_reset();
2952                goto done;
2953        }
2954
2955        handled += handle_pmi_common(regs, status);
2956
2957        /*
2958         * Repeat if there is more work to be done:
2959         */
2960        status = intel_pmu_get_status();
2961        if (status)
2962                goto again;
2963
2964done:
2965        if (mid_ack)
2966                apic_write(APIC_LVTPC, APIC_DM_NMI);
2967        /* Only restore PMU state when it's active. See x86_pmu_disable(). */
2968        cpuc->enabled = pmu_enabled;
2969        if (pmu_enabled)
2970                __intel_pmu_enable_all(0, true);
2971        intel_bts_enable_local();
2972
2973        /*
2974         * Only unmask the NMI after the overflow counters
2975         * have been reset. This avoids spurious NMIs on
2976         * Haswell CPUs.
2977         */
2978        if (late_ack)
2979                apic_write(APIC_LVTPC, APIC_DM_NMI);
2980        return handled;
2981}
2982
2983static struct event_constraint *
2984intel_bts_constraints(struct perf_event *event)
2985{
2986        if (unlikely(intel_pmu_has_bts(event)))
2987                return &bts_constraint;
2988
2989        return NULL;
2990}
2991
2992/*
2993 * Note: matches a fake event, like Fixed2.
2994 */
2995static struct event_constraint *
2996intel_vlbr_constraints(struct perf_event *event)
2997{
2998        struct event_constraint *c = &vlbr_constraint;
2999
3000        if (unlikely(constraint_match(c, event->hw.config)))
3001                return c;
3002
3003        return NULL;
3004}
3005
3006static int intel_alt_er(struct cpu_hw_events *cpuc,
3007                        int idx, u64 config)
3008{
3009        struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
3010        int alt_idx = idx;
3011
3012        if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
3013                return idx;
3014
3015        if (idx == EXTRA_REG_RSP_0)
3016                alt_idx = EXTRA_REG_RSP_1;
3017
3018        if (idx == EXTRA_REG_RSP_1)
3019                alt_idx = EXTRA_REG_RSP_0;
3020
3021        if (config & ~extra_regs[alt_idx].valid_mask)
3022                return idx;
3023
3024        return alt_idx;
3025}
3026
3027static void intel_fixup_er(struct perf_event *event, int idx)
3028{
3029        struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
3030        event->hw.extra_reg.idx = idx;
3031
3032        if (idx == EXTRA_REG_RSP_0) {
3033                event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3034                event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event;
3035                event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
3036        } else if (idx == EXTRA_REG_RSP_1) {
3037                event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3038                event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event;
3039                event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
3040        }
3041}
3042
3043/*
3044 * manage allocation of shared extra msr for certain events
3045 *
3046 * sharing can be:
3047 * per-cpu: to be shared between the various events on a single PMU
3048 * per-core: per-cpu + shared by HT threads
3049 */
3050static struct event_constraint *
3051__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
3052                                   struct perf_event *event,
3053                                   struct hw_perf_event_extra *reg)
3054{
3055        struct event_constraint *c = &emptyconstraint;
3056        struct er_account *era;
3057        unsigned long flags;
3058        int idx = reg->idx;
3059
3060        /*
3061         * reg->alloc can be set due to existing state, so for fake cpuc we
3062         * need to ignore this, otherwise we might fail to allocate proper fake
3063         * state for this extra reg constraint. Also see the comment below.
3064         */
3065        if (reg->alloc && !cpuc->is_fake)
3066                return NULL; /* call x86_get_event_constraint() */
3067
3068again:
3069        era = &cpuc->shared_regs->regs[idx];
3070        /*
3071         * we use spin_lock_irqsave() to avoid lockdep issues when
3072         * passing a fake cpuc
3073         */
3074        raw_spin_lock_irqsave(&era->lock, flags);
3075
3076        if (!atomic_read(&era->ref) || era->config == reg->config) {
3077
3078                /*
3079                 * If its a fake cpuc -- as per validate_{group,event}() we
3080                 * shouldn't touch event state and we can avoid doing so
3081                 * since both will only call get_event_constraints() once
3082                 * on each event, this avoids the need for reg->alloc.
3083                 *
3084                 * Not doing the ER fixup will only result in era->reg being
3085                 * wrong, but since we won't actually try and program hardware
3086                 * this isn't a problem either.
3087                 */
3088                if (!cpuc->is_fake) {
3089                        if (idx != reg->idx)
3090                                intel_fixup_er(event, idx);
3091
3092                        /*
3093                         * x86_schedule_events() can call get_event_constraints()
3094                         * multiple times on events in the case of incremental
3095                         * scheduling(). reg->alloc ensures we only do the ER
3096                         * allocation once.
3097                         */
3098                        reg->alloc = 1;
3099                }
3100
3101                /* lock in msr value */
3102                era->config = reg->config;
3103                era->reg = reg->reg;
3104
3105                /* one more user */
3106                atomic_inc(&era->ref);
3107
3108                /*
3109                 * need to call x86_get_event_constraint()
3110                 * to check if associated event has constraints
3111                 */
3112                c = NULL;
3113        } else {
3114                idx = intel_alt_er(cpuc, idx, reg->config);
3115                if (idx != reg->idx) {
3116                        raw_spin_unlock_irqrestore(&era->lock, flags);
3117                        goto again;
3118                }
3119        }
3120        raw_spin_unlock_irqrestore(&era->lock, flags);
3121
3122        return c;
3123}
3124
3125static void
3126__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
3127                                   struct hw_perf_event_extra *reg)
3128{
3129        struct er_account *era;
3130
3131        /*
3132         * Only put constraint if extra reg was actually allocated. Also takes
3133         * care of event which do not use an extra shared reg.
3134         *
3135         * Also, if this is a fake cpuc we shouldn't touch any event state
3136         * (reg->alloc) and we don't care about leaving inconsistent cpuc state
3137         * either since it'll be thrown out.
3138         */
3139        if (!reg->alloc || cpuc->is_fake)
3140                return;
3141
3142        era = &cpuc->shared_regs->regs[reg->idx];
3143
3144        /* one fewer user */
3145        atomic_dec(&era->ref);
3146
3147        /* allocate again next time */
3148        reg->alloc = 0;
3149}
3150
3151static struct event_constraint *
3152intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
3153                              struct perf_event *event)
3154{
3155        struct event_constraint *c = NULL, *d;
3156        struct hw_perf_event_extra *xreg, *breg;
3157
3158        xreg = &event->hw.extra_reg;
3159        if (xreg->idx != EXTRA_REG_NONE) {
3160                c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
3161                if (c == &emptyconstraint)
3162                        return c;
3163        }
3164        breg = &event->hw.branch_reg;
3165        if (breg->idx != EXTRA_REG_NONE) {
3166                d = __intel_shared_reg_get_constraints(cpuc, event, breg);
3167                if (d == &emptyconstraint) {
3168                        __intel_shared_reg_put_constraints(cpuc, xreg);
3169                        c = d;
3170                }
3171        }
3172        return c;
3173}
3174
3175struct event_constraint *
3176x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3177                          struct perf_event *event)
3178{
3179        struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
3180        struct event_constraint *c;
3181
3182        if (event_constraints) {
3183                for_each_event_constraint(c, event_constraints) {
3184                        if (constraint_match(c, event->hw.config)) {
3185                                event->hw.flags |= c->flags;
3186                                return c;
3187                        }
3188                }
3189        }
3190
3191        return &hybrid_var(cpuc->pmu, unconstrained);
3192}
3193
3194static struct event_constraint *
3195__intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3196                            struct perf_event *event)
3197{
3198        struct event_constraint *c;
3199
3200        c = intel_vlbr_constraints(event);
3201        if (c)
3202                return c;
3203
3204        c = intel_bts_constraints(event);
3205        if (c)
3206                return c;
3207
3208        c = intel_shared_regs_constraints(cpuc, event);
3209        if (c)
3210                return c;
3211
3212        c = intel_pebs_constraints(event);
3213        if (c)
3214                return c;
3215
3216        return x86_get_event_constraints(cpuc, idx, event);
3217}
3218
3219static void
3220intel_start_scheduling(struct cpu_hw_events *cpuc)
3221{
3222        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3223        struct intel_excl_states *xl;
3224        int tid = cpuc->excl_thread_id;
3225
3226        /*
3227         * nothing needed if in group validation mode
3228         */
3229        if (cpuc->is_fake || !is_ht_workaround_enabled())
3230                return;
3231
3232        /*
3233         * no exclusion needed
3234         */
3235        if (WARN_ON_ONCE(!excl_cntrs))
3236                return;
3237
3238        xl = &excl_cntrs->states[tid];
3239
3240        xl->sched_started = true;
3241        /*
3242         * lock shared state until we are done scheduling
3243         * in stop_event_scheduling()
3244         * makes scheduling appear as a transaction
3245         */
3246        raw_spin_lock(&excl_cntrs->lock);
3247}
3248
3249static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
3250{
3251        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3252        struct event_constraint *c = cpuc->event_constraint[idx];
3253        struct intel_excl_states *xl;
3254        int tid = cpuc->excl_thread_id;
3255
3256        if (cpuc->is_fake || !is_ht_workaround_enabled())
3257                return;
3258
3259        if (WARN_ON_ONCE(!excl_cntrs))
3260                return;
3261
3262        if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
3263                return;
3264
3265        xl = &excl_cntrs->states[tid];
3266
3267        lockdep_assert_held(&excl_cntrs->lock);
3268
3269        if (c->flags & PERF_X86_EVENT_EXCL)
3270                xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
3271        else
3272                xl->state[cntr] = INTEL_EXCL_SHARED;
3273}
3274
3275static void
3276intel_stop_scheduling(struct cpu_hw_events *cpuc)
3277{
3278        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3279        struct intel_excl_states *xl;
3280        int tid = cpuc->excl_thread_id;
3281
3282        /*
3283         * nothing needed if in group validation mode
3284         */
3285        if (cpuc->is_fake || !is_ht_workaround_enabled())
3286                return;
3287        /*
3288         * no exclusion needed
3289         */
3290        if (WARN_ON_ONCE(!excl_cntrs))
3291                return;
3292
3293        xl = &excl_cntrs->states[tid];
3294
3295        xl->sched_started = false;
3296        /*
3297         * release shared state lock (acquired in intel_start_scheduling())
3298         */
3299        raw_spin_unlock(&excl_cntrs->lock);
3300}
3301
3302static struct event_constraint *
3303dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
3304{
3305        WARN_ON_ONCE(!cpuc->constraint_list);
3306
3307        if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
3308                struct event_constraint *cx;
3309
3310                /*
3311                 * grab pre-allocated constraint entry
3312                 */
3313                cx = &cpuc->constraint_list[idx];
3314
3315                /*
3316                 * initialize dynamic constraint
3317                 * with static constraint
3318                 */
3319                *cx = *c;
3320
3321                /*
3322                 * mark constraint as dynamic
3323                 */
3324                cx->flags |= PERF_X86_EVENT_DYNAMIC;
3325                c = cx;
3326        }
3327
3328        return c;
3329}
3330
3331static struct event_constraint *
3332intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
3333                           int idx, struct event_constraint *c)
3334{
3335        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3336        struct intel_excl_states *xlo;
3337        int tid = cpuc->excl_thread_id;
3338        int is_excl, i, w;
3339
3340        /*
3341         * validating a group does not require
3342         * enforcing cross-thread  exclusion
3343         */
3344        if (cpuc->is_fake || !is_ht_workaround_enabled())
3345                return c;
3346
3347        /*
3348         * no exclusion needed
3349         */
3350        if (WARN_ON_ONCE(!excl_cntrs))
3351                return c;
3352
3353        /*
3354         * because we modify the constraint, we need
3355         * to make a copy. Static constraints come
3356         * from static const tables.
3357         *
3358         * only needed when constraint has not yet
3359         * been cloned (marked dynamic)
3360         */
3361        c = dyn_constraint(cpuc, c, idx);
3362
3363        /*
3364         * From here on, the constraint is dynamic.
3365         * Either it was just allocated above, or it
3366         * was allocated during a earlier invocation
3367         * of this function
3368         */
3369
3370        /*
3371         * state of sibling HT
3372         */
3373        xlo = &excl_cntrs->states[tid ^ 1];
3374
3375        /*
3376         * event requires exclusive counter access
3377         * across HT threads
3378         */
3379        is_excl = c->flags & PERF_X86_EVENT_EXCL;
3380        if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
3381                event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
3382                if (!cpuc->n_excl++)
3383                        WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
3384        }
3385
3386        /*
3387         * Modify static constraint with current dynamic
3388         * state of thread
3389         *
3390         * EXCLUSIVE: sibling counter measuring exclusive event
3391         * SHARED   : sibling counter measuring non-exclusive event
3392         * UNUSED   : sibling counter unused
3393         */
3394        w = c->weight;
3395        for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
3396                /*
3397                 * exclusive event in sibling counter
3398                 * our corresponding counter cannot be used
3399                 * regardless of our event
3400                 */
3401                if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
3402                        __clear_bit(i, c->idxmsk);
3403                        w--;
3404                        continue;
3405                }
3406                /*
3407                 * if measuring an exclusive event, sibling
3408                 * measuring non-exclusive, then counter cannot
3409                 * be used
3410                 */
3411                if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
3412                        __clear_bit(i, c->idxmsk);
3413                        w--;
3414                        continue;
3415                }
3416        }
3417
3418        /*
3419         * if we return an empty mask, then switch
3420         * back to static empty constraint to avoid
3421         * the cost of freeing later on
3422         */
3423        if (!w)
3424                c = &emptyconstraint;
3425
3426        c->weight = w;
3427
3428        return c;
3429}
3430
3431static struct event_constraint *
3432intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3433                            struct perf_event *event)
3434{
3435        struct event_constraint *c1, *c2;
3436
3437        c1 = cpuc->event_constraint[idx];
3438
3439        /*
3440         * first time only
3441         * - static constraint: no change across incremental scheduling calls
3442         * - dynamic constraint: handled by intel_get_excl_constraints()
3443         */
3444        c2 = __intel_get_event_constraints(cpuc, idx, event);
3445        if (c1) {
3446                WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3447                bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3448                c1->weight = c2->weight;
3449                c2 = c1;
3450        }
3451
3452        if (cpuc->excl_cntrs)
3453                return intel_get_excl_constraints(cpuc, event, idx, c2);
3454
3455        return c2;
3456}
3457
3458static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3459                struct perf_event *event)
3460{
3461        struct hw_perf_event *hwc = &event->hw;
3462        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3463        int tid = cpuc->excl_thread_id;
3464        struct intel_excl_states *xl;
3465
3466        /*
3467         * nothing needed if in group validation mode
3468         */
3469        if (cpuc->is_fake)
3470                return;
3471
3472        if (WARN_ON_ONCE(!excl_cntrs))
3473                return;
3474
3475        if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
3476                hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
3477                if (!--cpuc->n_excl)
3478                        WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
3479        }
3480
3481        /*
3482         * If event was actually assigned, then mark the counter state as
3483         * unused now.
3484         */
3485        if (hwc->idx >= 0) {
3486                xl = &excl_cntrs->states[tid];
3487
3488                /*
3489                 * put_constraint may be called from x86_schedule_events()
3490                 * which already has the lock held so here make locking
3491                 * conditional.
3492                 */
3493                if (!xl->sched_started)
3494                        raw_spin_lock(&excl_cntrs->lock);
3495
3496                xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3497
3498                if (!xl->sched_started)
3499                        raw_spin_unlock(&excl_cntrs->lock);
3500        }
3501}
3502
3503static void
3504intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3505                                        struct perf_event *event)
3506{
3507        struct hw_perf_event_extra *reg;
3508
3509        reg = &event->hw.extra_reg;
3510        if (reg->idx != EXTRA_REG_NONE)
3511                __intel_shared_reg_put_constraints(cpuc, reg);
3512
3513        reg = &event->hw.branch_reg;
3514        if (reg->idx != EXTRA_REG_NONE)
3515                __intel_shared_reg_put_constraints(cpuc, reg);
3516}
3517
3518static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3519                                        struct perf_event *event)
3520{
3521        intel_put_shared_regs_event_constraints(cpuc, event);
3522
3523        /*
3524         * is PMU has exclusive counter restrictions, then
3525         * all events are subject to and must call the
3526         * put_excl_constraints() routine
3527         */
3528        if (cpuc->excl_cntrs)
3529                intel_put_excl_constraints(cpuc, event);
3530}
3531
3532static void intel_pebs_aliases_core2(struct perf_event *event)
3533{
3534        if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3535                /*
3536                 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3537                 * (0x003c) so that we can use it with PEBS.
3538                 *
3539                 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3540                 * PEBS capable. However we can use INST_RETIRED.ANY_P
3541                 * (0x00c0), which is a PEBS capable event, to get the same
3542                 * count.
3543                 *
3544                 * INST_RETIRED.ANY_P counts the number of cycles that retires
3545                 * CNTMASK instructions. By setting CNTMASK to a value (16)
3546                 * larger than the maximum number of instructions that can be
3547                 * retired per cycle (4) and then inverting the condition, we
3548                 * count all cycles that retire 16 or less instructions, which
3549                 * is every cycle.
3550                 *
3551                 * Thereby we gain a PEBS capable cycle counter.
3552                 */
3553                u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3554
3555                alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3556                event->hw.config = alt_config;
3557        }
3558}
3559
3560static void intel_pebs_aliases_snb(struct perf_event *event)
3561{
3562        if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3563                /*
3564                 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3565                 * (0x003c) so that we can use it with PEBS.
3566                 *
3567                 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3568                 * PEBS capable. However we can use UOPS_RETIRED.ALL
3569                 * (0x01c2), which is a PEBS capable event, to get the same
3570                 * count.
3571                 *
3572                 * UOPS_RETIRED.ALL counts the number of cycles that retires
3573                 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
3574                 * larger than the maximum number of micro-ops that can be
3575                 * retired per cycle (4) and then inverting the condition, we
3576                 * count all cycles that retire 16 or less micro-ops, which
3577                 * is every cycle.
3578                 *
3579                 * Thereby we gain a PEBS capable cycle counter.
3580                 */
3581                u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3582
3583                alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3584                event->hw.config = alt_config;
3585        }
3586}
3587
3588static void intel_pebs_aliases_precdist(struct perf_event *event)
3589{
3590        if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3591                /*
3592                 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
3593                 * (0x003c) so that we can use it with PEBS.
3594                 *
3595                 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
3596                 * PEBS capable. However we can use INST_RETIRED.PREC_DIST
3597                 * (0x01c0), which is a PEBS capable event, to get the same
3598                 * count.
3599                 *
3600                 * The PREC_DIST event has special support to minimize sample
3601                 * shadowing effects. One drawback is that it can be
3602                 * only programmed on counter 1, but that seems like an
3603                 * acceptable trade off.
3604                 */
3605                u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3606
3607                alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3608                event->hw.config = alt_config;
3609        }
3610}
3611
3612static void intel_pebs_aliases_ivb(struct perf_event *event)
3613{
3614        if (event->attr.precise_ip < 3)
3615                return intel_pebs_aliases_snb(event);
3616        return intel_pebs_aliases_precdist(event);
3617}
3618
3619static void intel_pebs_aliases_skl(struct perf_event *event)
3620{
3621        if (event->attr.precise_ip < 3)
3622                return intel_pebs_aliases_core2(event);
3623        return intel_pebs_aliases_precdist(event);
3624}
3625
3626static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3627{
3628        unsigned long flags = x86_pmu.large_pebs_flags;
3629
3630        if (event->attr.use_clockid)
3631                flags &= ~PERF_SAMPLE_TIME;
3632        if (!event->attr.exclude_kernel)
3633                flags &= ~PERF_SAMPLE_REGS_USER;
3634        if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
3635                flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3636        return flags;
3637}
3638
3639static int intel_pmu_bts_config(struct perf_event *event)
3640{
3641        struct perf_event_attr *attr = &event->attr;
3642
3643        if (unlikely(intel_pmu_has_bts(event))) {
3644                /* BTS is not supported by this architecture. */
3645                if (!x86_pmu.bts_active)
3646                        return -EOPNOTSUPP;
3647
3648                /* BTS is currently only allowed for user-mode. */
3649                if (!attr->exclude_kernel)
3650                        return -EOPNOTSUPP;
3651
3652                /* BTS is not allowed for precise events. */
3653                if (attr->precise_ip)
3654                        return -EOPNOTSUPP;
3655
3656                /* disallow bts if conflicting events are present */
3657                if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3658                        return -EBUSY;
3659
3660                event->destroy = hw_perf_lbr_event_destroy;
3661        }
3662
3663        return 0;
3664}
3665
3666static int core_pmu_hw_config(struct perf_event *event)
3667{
3668        int ret = x86_pmu_hw_config(event);
3669
3670        if (ret)
3671                return ret;
3672
3673        return intel_pmu_bts_config(event);
3674}
3675
3676#define INTEL_TD_METRIC_AVAILABLE_MAX   (INTEL_TD_METRIC_RETIRING + \
3677                                         ((x86_pmu.num_topdown_events - 1) << 8))
3678
3679static bool is_available_metric_event(struct perf_event *event)
3680{
3681        return is_metric_event(event) &&
3682                event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX;
3683}
3684
3685static inline bool is_mem_loads_event(struct perf_event *event)
3686{
3687        return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01);
3688}
3689
3690static inline bool is_mem_loads_aux_event(struct perf_event *event)
3691{
3692        return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82);
3693}
3694
3695static inline bool require_mem_loads_aux_event(struct perf_event *event)
3696{
3697        if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX))
3698                return false;
3699
3700        if (is_hybrid())
3701                return hybrid_pmu(event->pmu)->cpu_type == hybrid_big;
3702
3703        return true;
3704}
3705
3706static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
3707{
3708        union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap);
3709
3710        return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
3711}
3712
3713static int intel_pmu_hw_config(struct perf_event *event)
3714{
3715        int ret = x86_pmu_hw_config(event);
3716
3717        if (ret)
3718                return ret;
3719
3720        ret = intel_pmu_bts_config(event);
3721        if (ret)
3722                return ret;
3723
3724        if (event->attr.precise_ip) {
3725                if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
3726                        return -EINVAL;
3727
3728                if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
3729                        event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3730                        if (!(event->attr.sample_type &
3731                              ~intel_pmu_large_pebs_flags(event))) {
3732                                event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3733                                event->attach_state |= PERF_ATTACH_SCHED_CB;
3734                        }
3735                }
3736                if (x86_pmu.pebs_aliases)
3737                        x86_pmu.pebs_aliases(event);
3738
3739                if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3740                        event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
3741        }
3742
3743        if (needs_branch_stack(event)) {
3744                ret = intel_pmu_setup_lbr_filter(event);
3745                if (ret)
3746                        return ret;
3747                event->attach_state |= PERF_ATTACH_SCHED_CB;
3748
3749                /*
3750                 * BTS is set up earlier in this path, so don't account twice
3751                 */
3752                if (!unlikely(intel_pmu_has_bts(event))) {
3753                        /* disallow lbr if conflicting events are present */
3754                        if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3755                                return -EBUSY;
3756
3757                        event->destroy = hw_perf_lbr_event_destroy;
3758                }
3759        }
3760
3761        if (event->attr.aux_output) {
3762                if (!event->attr.precise_ip)
3763                        return -EINVAL;
3764
3765                event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
3766        }
3767
3768        if ((event->attr.type == PERF_TYPE_HARDWARE) ||
3769            (event->attr.type == PERF_TYPE_HW_CACHE))
3770                return 0;
3771
3772        /*
3773         * Config Topdown slots and metric events
3774         *
3775         * The slots event on Fixed Counter 3 can support sampling,
3776         * which will be handled normally in x86_perf_event_update().
3777         *
3778         * Metric events don't support sampling and require being paired
3779         * with a slots event as group leader. When the slots event
3780         * is used in a metrics group, it too cannot support sampling.
3781         */
3782        if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) {
3783                if (event->attr.config1 || event->attr.config2)
3784                        return -EINVAL;
3785
3786                /*
3787                 * The TopDown metrics events and slots event don't
3788                 * support any filters.
3789                 */
3790                if (event->attr.config & X86_ALL_EVENT_FLAGS)
3791                        return -EINVAL;
3792
3793                if (is_available_metric_event(event)) {
3794                        struct perf_event *leader = event->group_leader;
3795
3796                        /* The metric events don't support sampling. */
3797                        if (is_sampling_event(event))
3798                                return -EINVAL;
3799
3800                        /* The metric events require a slots group leader. */
3801                        if (!is_slots_event(leader))
3802                                return -EINVAL;
3803
3804                        /*
3805                         * The leader/SLOTS must not be a sampling event for
3806                         * metric use; hardware requires it starts at 0 when used
3807                         * in conjunction with MSR_PERF_METRICS.
3808                         */
3809                        if (is_sampling_event(leader))
3810                                return -EINVAL;
3811
3812                        event->event_caps |= PERF_EV_CAP_SIBLING;
3813                        /*
3814                         * Only once we have a METRICs sibling do we
3815                         * need TopDown magic.
3816                         */
3817                        leader->hw.flags |= PERF_X86_EVENT_TOPDOWN;
3818                        event->hw.flags  |= PERF_X86_EVENT_TOPDOWN;
3819                }
3820        }
3821
3822        /*
3823         * The load latency event X86_CONFIG(.event=0xcd, .umask=0x01) on SPR
3824         * doesn't function quite right. As a work-around it needs to always be
3825         * co-scheduled with a auxiliary event X86_CONFIG(.event=0x03, .umask=0x82).
3826         * The actual count of this second event is irrelevant it just needs
3827         * to be active to make the first event function correctly.
3828         *
3829         * In a group, the auxiliary event must be in front of the load latency
3830         * event. The rule is to simplify the implementation of the check.
3831         * That's because perf cannot have a complete group at the moment.
3832         */
3833        if (require_mem_loads_aux_event(event) &&
3834            (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) &&
3835            is_mem_loads_event(event)) {
3836                struct perf_event *leader = event->group_leader;
3837                struct perf_event *sibling = NULL;
3838
3839                if (!is_mem_loads_aux_event(leader)) {
3840                        for_each_sibling_event(sibling, leader) {
3841                                if (is_mem_loads_aux_event(sibling))
3842                                        break;
3843                        }
3844                        if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list))
3845                                return -ENODATA;
3846                }
3847        }
3848
3849        if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
3850                return 0;
3851
3852        if (x86_pmu.version < 3)
3853                return -EINVAL;
3854
3855        ret = perf_allow_cpu(&event->attr);
3856        if (ret)
3857                return ret;
3858
3859        event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
3860
3861        return 0;
3862}
3863
3864static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
3865{
3866        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3867        struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3868        u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
3869
3870        arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
3871        arr[0].host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
3872        arr[0].guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask;
3873        if (x86_pmu.flags & PMU_FL_PEBS_ALL)
3874                arr[0].guest &= ~cpuc->pebs_enabled;
3875        else
3876                arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
3877        *nr = 1;
3878
3879        if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) {
3880                /*
3881                 * If PMU counter has PEBS enabled it is not enough to
3882                 * disable counter on a guest entry since PEBS memory
3883                 * write can overshoot guest entry and corrupt guest
3884                 * memory. Disabling PEBS solves the problem.
3885                 *
3886                 * Don't do this if the CPU already enforces it.
3887                 */
3888                arr[1].msr = MSR_IA32_PEBS_ENABLE;
3889                arr[1].host = cpuc->pebs_enabled;
3890                arr[1].guest = 0;
3891                *nr = 2;
3892        }
3893
3894        return arr;
3895}
3896
3897static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
3898{
3899        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3900        struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3901        int idx;
3902
3903        for (idx = 0; idx < x86_pmu.num_counters; idx++)  {
3904                struct perf_event *event = cpuc->events[idx];
3905
3906                arr[idx].msr = x86_pmu_config_addr(idx);
3907                arr[idx].host = arr[idx].guest = 0;
3908
3909                if (!test_bit(idx, cpuc->active_mask))
3910                        continue;
3911
3912                arr[idx].host = arr[idx].guest =
3913                        event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
3914
3915                if (event->attr.exclude_host)
3916                        arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3917                else if (event->attr.exclude_guest)
3918                        arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3919        }
3920
3921        *nr = x86_pmu.num_counters;
3922        return arr;
3923}
3924
3925static void core_pmu_enable_event(struct perf_event *event)
3926{
3927        if (!event->attr.exclude_host)
3928                x86_pmu_enable_event(event);
3929}
3930
3931static void core_pmu_enable_all(int added)
3932{
3933        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3934        int idx;
3935
3936        for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3937                struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
3938
3939                if (!test_bit(idx, cpuc->active_mask) ||
3940                                cpuc->events[idx]->attr.exclude_host)
3941                        continue;
3942
3943                __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
3944        }
3945}
3946
3947static int hsw_hw_config(struct perf_event *event)
3948{
3949        int ret = intel_pmu_hw_config(event);
3950
3951        if (ret)
3952                return ret;
3953        if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
3954                return 0;
3955        event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
3956
3957        /*
3958         * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
3959         * PEBS or in ANY thread mode. Since the results are non-sensical forbid
3960         * this combination.
3961         */
3962        if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
3963             ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
3964              event->attr.precise_ip > 0))
3965                return -EOPNOTSUPP;
3966
3967        if (event_is_checkpointed(event)) {
3968                /*
3969                 * Sampling of checkpointed events can cause situations where
3970                 * the CPU constantly aborts because of a overflow, which is
3971                 * then checkpointed back and ignored. Forbid checkpointing
3972                 * for sampling.
3973                 *
3974                 * But still allow a long sampling period, so that perf stat
3975                 * from KVM works.
3976                 */
3977                if (event->attr.sample_period > 0 &&
3978                    event->attr.sample_period < 0x7fffffff)
3979                        return -EOPNOTSUPP;
3980        }
3981        return 0;
3982}
3983
3984static struct event_constraint counter0_constraint =
3985                        INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
3986
3987static struct event_constraint counter2_constraint =
3988                        EVENT_CONSTRAINT(0, 0x4, 0);
3989
3990static struct event_constraint fixed0_constraint =
3991                        FIXED_EVENT_CONSTRAINT(0x00c0, 0);
3992
3993static struct event_constraint fixed0_counter0_constraint =
3994                        INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
3995
3996static struct event_constraint *
3997hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3998                          struct perf_event *event)
3999{
4000        struct event_constraint *c;
4001
4002        c = intel_get_event_constraints(cpuc, idx, event);
4003
4004        /* Handle special quirk on in_tx_checkpointed only in counter 2 */
4005        if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
4006                if (c->idxmsk64 & (1U << 2))
4007                        return &counter2_constraint;
4008                return &emptyconstraint;
4009        }
4010
4011        return c;
4012}
4013
4014static struct event_constraint *
4015icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4016                          struct perf_event *event)
4017{
4018        /*
4019         * Fixed counter 0 has less skid.
4020         * Force instruction:ppp in Fixed counter 0
4021         */
4022        if ((event->attr.precise_ip == 3) &&
4023            constraint_match(&fixed0_constraint, event->hw.config))
4024                return &fixed0_constraint;
4025
4026        return hsw_get_event_constraints(cpuc, idx, event);
4027}
4028
4029static struct event_constraint *
4030spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4031                          struct perf_event *event)
4032{
4033        struct event_constraint *c;
4034
4035        c = icl_get_event_constraints(cpuc, idx, event);
4036
4037        /*
4038         * The :ppp indicates the Precise Distribution (PDist) facility, which
4039         * is only supported on the GP counter 0. If a :ppp event which is not
4040         * available on the GP counter 0, error out.
4041         * Exception: Instruction PDIR is only available on the fixed counter 0.
4042         */
4043        if ((event->attr.precise_ip == 3) &&
4044            !constraint_match(&fixed0_constraint, event->hw.config)) {
4045                if (c->idxmsk64 & BIT_ULL(0))
4046                        return &counter0_constraint;
4047
4048                return &emptyconstraint;
4049        }
4050
4051        return c;
4052}
4053
4054static struct event_constraint *
4055glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4056                          struct perf_event *event)
4057{
4058        struct event_constraint *c;
4059
4060        /* :ppp means to do reduced skid PEBS which is PMC0 only. */
4061        if (event->attr.precise_ip == 3)
4062                return &counter0_constraint;
4063
4064        c = intel_get_event_constraints(cpuc, idx, event);
4065
4066        return c;
4067}
4068
4069static struct event_constraint *
4070tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4071                          struct perf_event *event)
4072{
4073        struct event_constraint *c;
4074
4075        /*
4076         * :ppp means to do reduced skid PEBS,
4077         * which is available on PMC0 and fixed counter 0.
4078         */
4079        if (event->attr.precise_ip == 3) {
4080                /* Force instruction:ppp on PMC0 and Fixed counter 0 */
4081                if (constraint_match(&fixed0_constraint, event->hw.config))
4082                        return &fixed0_counter0_constraint;
4083
4084                return &counter0_constraint;
4085        }
4086
4087        c = intel_get_event_constraints(cpuc, idx, event);
4088
4089        return c;
4090}
4091
4092static bool allow_tsx_force_abort = true;
4093
4094static struct event_constraint *
4095tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4096                          struct perf_event *event)
4097{
4098        struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
4099
4100        /*
4101         * Without TFA we must not use PMC3.
4102         */
4103        if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
4104                c = dyn_constraint(cpuc, c, idx);
4105                c->idxmsk64 &= ~(1ULL << 3);
4106                c->weight--;
4107        }
4108
4109        return c;
4110}
4111
4112static struct event_constraint *
4113adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4114                          struct perf_event *event)
4115{
4116        struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4117
4118        if (pmu->cpu_type == hybrid_big)
4119                return spr_get_event_constraints(cpuc, idx, event);
4120        else if (pmu->cpu_type == hybrid_small)
4121                return tnt_get_event_constraints(cpuc, idx, event);
4122
4123        WARN_ON(1);
4124        return &emptyconstraint;
4125}
4126
4127static int adl_hw_config(struct perf_event *event)
4128{
4129        struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4130
4131        if (pmu->cpu_type == hybrid_big)
4132                return hsw_hw_config(event);
4133        else if (pmu->cpu_type == hybrid_small)
4134                return intel_pmu_hw_config(event);
4135
4136        WARN_ON(1);
4137        return -EOPNOTSUPP;
4138}
4139
4140static u8 adl_get_hybrid_cpu_type(void)
4141{
4142        return hybrid_big;
4143}
4144
4145/*
4146 * Broadwell:
4147 *
4148 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
4149 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
4150 * the two to enforce a minimum period of 128 (the smallest value that has bits
4151 * 0-5 cleared and >= 100).
4152 *
4153 * Because of how the code in x86_perf_event_set_period() works, the truncation
4154 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
4155 * to make up for the 'lost' events due to carrying the 'error' in period_left.
4156 *
4157 * Therefore the effective (average) period matches the requested period,
4158 * despite coarser hardware granularity.
4159 */
4160static u64 bdw_limit_period(struct perf_event *event, u64 left)
4161{
4162        if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
4163                        X86_CONFIG(.event=0xc0, .umask=0x01)) {
4164                if (left < 128)
4165                        left = 128;
4166                left &= ~0x3fULL;
4167        }
4168        return left;
4169}
4170
4171static u64 nhm_limit_period(struct perf_event *event, u64 left)
4172{
4173        return max(left, 32ULL);
4174}
4175
4176static u64 spr_limit_period(struct perf_event *event, u64 left)
4177{
4178        if (event->attr.precise_ip == 3)
4179                return max(left, 128ULL);
4180
4181        return left;
4182}
4183
4184PMU_FORMAT_ATTR(event,  "config:0-7"    );
4185PMU_FORMAT_ATTR(umask,  "config:8-15"   );
4186PMU_FORMAT_ATTR(edge,   "config:18"     );
4187PMU_FORMAT_ATTR(pc,     "config:19"     );
4188PMU_FORMAT_ATTR(any,    "config:21"     ); /* v3 + */
4189PMU_FORMAT_ATTR(inv,    "config:23"     );
4190PMU_FORMAT_ATTR(cmask,  "config:24-31"  );
4191PMU_FORMAT_ATTR(in_tx,  "config:32");
4192PMU_FORMAT_ATTR(in_tx_cp, "config:33");
4193
4194static struct attribute *intel_arch_formats_attr[] = {
4195        &format_attr_event.attr,
4196        &format_attr_umask.attr,
4197        &format_attr_edge.attr,
4198        &format_attr_pc.attr,
4199        &format_attr_inv.attr,
4200        &format_attr_cmask.attr,
4201        NULL,
4202};
4203
4204ssize_t intel_event_sysfs_show(char *page, u64 config)
4205{
4206        u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
4207
4208        return x86_event_sysfs_show(page, config, event);
4209}
4210
4211static struct intel_shared_regs *allocate_shared_regs(int cpu)
4212{
4213        struct intel_shared_regs *regs;
4214        int i;
4215
4216        regs = kzalloc_node(sizeof(struct intel_shared_regs),
4217                            GFP_KERNEL, cpu_to_node(cpu));
4218        if (regs) {
4219                /*
4220                 * initialize the locks to keep lockdep happy
4221                 */
4222                for (i = 0; i < EXTRA_REG_MAX; i++)
4223                        raw_spin_lock_init(&regs->regs[i].lock);
4224
4225                regs->core_id = -1;
4226        }
4227        return regs;
4228}
4229
4230static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
4231{
4232        struct intel_excl_cntrs *c;
4233
4234        c = kzalloc_node(sizeof(struct intel_excl_cntrs),
4235                         GFP_KERNEL, cpu_to_node(cpu));
4236        if (c) {
4237                raw_spin_lock_init(&c->lock);
4238                c->core_id = -1;
4239        }
4240        return c;
4241}
4242
4243
4244int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
4245{
4246        cpuc->pebs_record_size = x86_pmu.pebs_record_size;
4247
4248        if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
4249                cpuc->shared_regs = allocate_shared_regs(cpu);
4250                if (!cpuc->shared_regs)
4251                        goto err;
4252        }
4253
4254        if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
4255                size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
4256
4257                cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
4258                if (!cpuc->constraint_list)
4259                        goto err_shared_regs;
4260        }
4261
4262        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4263                cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
4264                if (!cpuc->excl_cntrs)
4265                        goto err_constraint_list;
4266
4267                cpuc->excl_thread_id = 0;
4268        }
4269
4270        return 0;
4271
4272err_constraint_list:
4273        kfree(cpuc->constraint_list);
4274        cpuc->constraint_list = NULL;
4275
4276err_shared_regs:
4277        kfree(cpuc->shared_regs);
4278        cpuc->shared_regs = NULL;
4279
4280err:
4281        return -ENOMEM;
4282}
4283
4284static int intel_pmu_cpu_prepare(int cpu)
4285{
4286        return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
4287}
4288
4289static void flip_smm_bit(void *data)
4290{
4291        unsigned long set = *(unsigned long *)data;
4292
4293        if (set > 0) {
4294                msr_set_bit(MSR_IA32_DEBUGCTLMSR,
4295                            DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4296        } else {
4297                msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
4298                              DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4299        }
4300}
4301
4302static bool init_hybrid_pmu(int cpu)
4303{
4304        struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4305        u8 cpu_type = get_this_hybrid_cpu_type();
4306        struct x86_hybrid_pmu *pmu = NULL;
4307        int i;
4308
4309        if (!cpu_type && x86_pmu.get_hybrid_cpu_type)
4310                cpu_type = x86_pmu.get_hybrid_cpu_type();
4311
4312        for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
4313                if (x86_pmu.hybrid_pmu[i].cpu_type == cpu_type) {
4314                        pmu = &x86_pmu.hybrid_pmu[i];
4315                        break;
4316                }
4317        }
4318        if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) {
4319                cpuc->pmu = NULL;
4320                return false;
4321        }
4322
4323        /* Only check and dump the PMU information for the first CPU */
4324        if (!cpumask_empty(&pmu->supported_cpus))
4325                goto end;
4326
4327        if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed))
4328                return false;
4329
4330        pr_info("%s PMU driver: ", pmu->name);
4331
4332        if (pmu->intel_cap.pebs_output_pt_available)
4333                pr_cont("PEBS-via-PT ");
4334
4335        pr_cont("\n");
4336
4337        x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed,
4338                             pmu->intel_ctrl);
4339
4340end:
4341        cpumask_set_cpu(cpu, &pmu->supported_cpus);
4342        cpuc->pmu = &pmu->pmu;
4343
4344        x86_pmu_update_cpu_context(&pmu->pmu, cpu);
4345
4346        return true;
4347}
4348
4349static void intel_pmu_cpu_starting(int cpu)
4350{
4351        struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4352        int core_id = topology_core_id(cpu);
4353        int i;
4354
4355        if (is_hybrid() && !init_hybrid_pmu(cpu))
4356                return;
4357
4358        init_debug_store_on_cpu(cpu);
4359        /*
4360         * Deal with CPUs that don't clear their LBRs on power-up.
4361         */
4362        intel_pmu_lbr_reset();
4363
4364        cpuc->lbr_sel = NULL;
4365
4366        if (x86_pmu.flags & PMU_FL_TFA) {
4367                WARN_ON_ONCE(cpuc->tfa_shadow);
4368                cpuc->tfa_shadow = ~0ULL;
4369                intel_set_tfa(cpuc, false);
4370        }
4371
4372        if (x86_pmu.version > 1)
4373                flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
4374
4375        /*
4376         * Disable perf metrics if any added CPU doesn't support it.
4377         *
4378         * Turn off the check for a hybrid architecture, because the
4379         * architecture MSR, MSR_IA32_PERF_CAPABILITIES, only indicate
4380         * the architecture features. The perf metrics is a model-specific
4381         * feature for now. The corresponding bit should always be 0 on
4382         * a hybrid platform, e.g., Alder Lake.
4383         */
4384        if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) {
4385                union perf_capabilities perf_cap;
4386
4387                rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
4388                if (!perf_cap.perf_metrics) {
4389                        x86_pmu.intel_cap.perf_metrics = 0;
4390                        x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
4391                }
4392        }
4393
4394        if (!cpuc->shared_regs)
4395                return;
4396
4397        if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
4398                for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4399                        struct intel_shared_regs *pc;
4400
4401                        pc = per_cpu(cpu_hw_events, i).shared_regs;
4402                        if (pc && pc->core_id == core_id) {
4403                                cpuc->kfree_on_online[0] = cpuc->shared_regs;
4404                                cpuc->shared_regs = pc;
4405                                break;
4406                        }
4407                }
4408                cpuc->shared_regs->core_id = core_id;
4409                cpuc->shared_regs->refcnt++;
4410        }
4411
4412        if (x86_pmu.lbr_sel_map)
4413                cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
4414
4415        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4416                for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4417                        struct cpu_hw_events *sibling;
4418                        struct intel_excl_cntrs *c;
4419
4420                        sibling = &per_cpu(cpu_hw_events, i);
4421                        c = sibling->excl_cntrs;
4422                        if (c && c->core_id == core_id) {
4423                                cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
4424                                cpuc->excl_cntrs = c;
4425                                if (!sibling->excl_thread_id)
4426                                        cpuc->excl_thread_id = 1;
4427                                break;
4428                        }
4429                }
4430                cpuc->excl_cntrs->core_id = core_id;
4431                cpuc->excl_cntrs->refcnt++;
4432        }
4433}
4434
4435static void free_excl_cntrs(struct cpu_hw_events *cpuc)
4436{
4437        struct intel_excl_cntrs *c;
4438
4439        c = cpuc->excl_cntrs;
4440        if (c) {
4441                if (c->core_id == -1 || --c->refcnt == 0)
4442                        kfree(c);
4443                cpuc->excl_cntrs = NULL;
4444        }
4445
4446        kfree(cpuc->constraint_list);
4447        cpuc->constraint_list = NULL;
4448}
4449
4450static void intel_pmu_cpu_dying(int cpu)
4451{
4452        fini_debug_store_on_cpu(cpu);
4453}
4454
4455void intel_cpuc_finish(struct cpu_hw_events *cpuc)
4456{
4457        struct intel_shared_regs *pc;