1
2
3
4
5
6
7
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/stddef.h>
12#include <linux/types.h>
13#include <linux/init.h>
14#include <linux/slab.h>
15#include <linux/export.h>
16#include <linux/nmi.h>
17
18#include <asm/cpufeature.h>
19#include <asm/hardirq.h>
20#include <asm/intel-family.h>
21#include <asm/intel_pt.h>
22#include <asm/apic.h>
23#include <asm/cpu_device_id.h>
24
25#include "../perf_event.h"
26
27
28
29
30static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
31{
32 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
33 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
34 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
35 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
36 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
37 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
38 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
39 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300,
40};
41
42static struct event_constraint intel_core_event_constraints[] __read_mostly =
43{
44 INTEL_EVENT_CONSTRAINT(0x11, 0x2),
45 INTEL_EVENT_CONSTRAINT(0x12, 0x2),
46 INTEL_EVENT_CONSTRAINT(0x13, 0x2),
47 INTEL_EVENT_CONSTRAINT(0x14, 0x1),
48 INTEL_EVENT_CONSTRAINT(0x19, 0x2),
49 INTEL_EVENT_CONSTRAINT(0xc1, 0x1),
50 EVENT_CONSTRAINT_END
51};
52
53static struct event_constraint intel_core2_event_constraints[] __read_mostly =
54{
55 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
56 FIXED_EVENT_CONSTRAINT(0x003c, 1),
57 FIXED_EVENT_CONSTRAINT(0x0300, 2),
58 INTEL_EVENT_CONSTRAINT(0x10, 0x1),
59 INTEL_EVENT_CONSTRAINT(0x11, 0x2),
60 INTEL_EVENT_CONSTRAINT(0x12, 0x2),
61 INTEL_EVENT_CONSTRAINT(0x13, 0x2),
62 INTEL_EVENT_CONSTRAINT(0x14, 0x1),
63 INTEL_EVENT_CONSTRAINT(0x18, 0x1),
64 INTEL_EVENT_CONSTRAINT(0x19, 0x2),
65 INTEL_EVENT_CONSTRAINT(0xa1, 0x1),
66 INTEL_EVENT_CONSTRAINT(0xc9, 0x1),
67 INTEL_EVENT_CONSTRAINT(0xcb, 0x1),
68 EVENT_CONSTRAINT_END
69};
70
71static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
72{
73 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
74 FIXED_EVENT_CONSTRAINT(0x003c, 1),
75 FIXED_EVENT_CONSTRAINT(0x0300, 2),
76 INTEL_EVENT_CONSTRAINT(0x40, 0x3),
77 INTEL_EVENT_CONSTRAINT(0x41, 0x3),
78 INTEL_EVENT_CONSTRAINT(0x42, 0x3),
79 INTEL_EVENT_CONSTRAINT(0x43, 0x3),
80 INTEL_EVENT_CONSTRAINT(0x48, 0x3),
81 INTEL_EVENT_CONSTRAINT(0x4e, 0x3),
82 INTEL_EVENT_CONSTRAINT(0x51, 0x3),
83 INTEL_EVENT_CONSTRAINT(0x63, 0x3),
84 EVENT_CONSTRAINT_END
85};
86
87static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
88{
89
90 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
91 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
92 EVENT_EXTRA_END
93};
94
95static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
96{
97 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
98 FIXED_EVENT_CONSTRAINT(0x003c, 1),
99 FIXED_EVENT_CONSTRAINT(0x0300, 2),
100 INTEL_EVENT_CONSTRAINT(0x51, 0x3),
101 INTEL_EVENT_CONSTRAINT(0x60, 0x1),
102 INTEL_EVENT_CONSTRAINT(0x63, 0x3),
103 INTEL_EVENT_CONSTRAINT(0xb3, 0x1),
104 EVENT_CONSTRAINT_END
105};
106
107static struct event_constraint intel_snb_event_constraints[] __read_mostly =
108{
109 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
110 FIXED_EVENT_CONSTRAINT(0x003c, 1),
111 FIXED_EVENT_CONSTRAINT(0x0300, 2),
112 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
113 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf),
114 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4),
115 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4),
116 INTEL_EVENT_CONSTRAINT(0x48, 0x4),
117 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2),
118 INTEL_EVENT_CONSTRAINT(0xcd, 0x8),
119 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
120 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4),
121
122
123
124
125
126 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),
127 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),
128 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),
129 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf),
130
131 EVENT_CONSTRAINT_END
132};
133
134static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
135{
136 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
137 FIXED_EVENT_CONSTRAINT(0x003c, 1),
138 FIXED_EVENT_CONSTRAINT(0x0300, 2),
139 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4),
140 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf),
141 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf),
142 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
143 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
144 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf),
145 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf),
146 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
147 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
148 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2),
149
150
151
152
153
154 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),
155 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),
156 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),
157 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf),
158
159 EVENT_CONSTRAINT_END
160};
161
162static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
163{
164
165 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
166 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
167 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
168 EVENT_EXTRA_END
169};
170
171static struct event_constraint intel_v1_event_constraints[] __read_mostly =
172{
173 EVENT_CONSTRAINT_END
174};
175
176static struct event_constraint intel_gen_event_constraints[] __read_mostly =
177{
178 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
179 FIXED_EVENT_CONSTRAINT(0x003c, 1),
180 FIXED_EVENT_CONSTRAINT(0x0300, 2),
181 EVENT_CONSTRAINT_END
182};
183
184static struct event_constraint intel_slm_event_constraints[] __read_mostly =
185{
186 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
187 FIXED_EVENT_CONSTRAINT(0x003c, 1),
188 FIXED_EVENT_CONSTRAINT(0x0300, 2),
189 EVENT_CONSTRAINT_END
190};
191
192static struct event_constraint intel_skl_event_constraints[] = {
193 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
194 FIXED_EVENT_CONSTRAINT(0x003c, 1),
195 FIXED_EVENT_CONSTRAINT(0x0300, 2),
196 INTEL_UEVENT_CONSTRAINT(0x1c0, 0x2),
197
198
199
200
201 INTEL_EVENT_CONSTRAINT(0xd0, 0xf),
202 INTEL_EVENT_CONSTRAINT(0xd1, 0xf),
203 INTEL_EVENT_CONSTRAINT(0xd2, 0xf),
204 INTEL_EVENT_CONSTRAINT(0xcd, 0xf),
205 INTEL_EVENT_CONSTRAINT(0xc6, 0xf),
206
207 EVENT_CONSTRAINT_END
208};
209
210static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
211 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
212 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
213 EVENT_EXTRA_END
214};
215
216static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
217
218 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
219 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
220 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
221 EVENT_EXTRA_END
222};
223
224static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
225
226 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
227 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
228 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
229 EVENT_EXTRA_END
230};
231
232static struct extra_reg intel_skl_extra_regs[] __read_mostly = {
233 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
234 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
235 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
236
237
238
239
240 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
241 EVENT_EXTRA_END
242};
243
244static struct event_constraint intel_icl_event_constraints[] = {
245 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
246 FIXED_EVENT_CONSTRAINT(0x01c0, 0),
247 FIXED_EVENT_CONSTRAINT(0x003c, 1),
248 FIXED_EVENT_CONSTRAINT(0x0300, 2),
249 FIXED_EVENT_CONSTRAINT(0x0400, 3),
250 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
251 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
252 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
253 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
254 INTEL_EVENT_CONSTRAINT_RANGE(0x03, 0x0a, 0xf),
255 INTEL_EVENT_CONSTRAINT_RANGE(0x1f, 0x28, 0xf),
256 INTEL_EVENT_CONSTRAINT(0x32, 0xf),
257 INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
258 INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
259 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff),
260 INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff),
261 INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff),
262 INTEL_EVENT_CONSTRAINT(0xa3, 0xf),
263 INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
264 INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
265 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
266 INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
267 EVENT_CONSTRAINT_END
268};
269
270static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
271 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
272 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
273 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
274 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
275 EVENT_EXTRA_END
276};
277
278static struct extra_reg intel_spr_extra_regs[] __read_mostly = {
279 INTEL_UEVENT_EXTRA_REG(0x012a, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
280 INTEL_UEVENT_EXTRA_REG(0x012b, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
281 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
282 INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
283 INTEL_UEVENT_EXTRA_REG(0x40ad, MSR_PEBS_FRONTEND, 0x7, FE),
284 INTEL_UEVENT_EXTRA_REG(0x04c2, MSR_PEBS_FRONTEND, 0x8, FE),
285 EVENT_EXTRA_END
286};
287
288static struct event_constraint intel_spr_event_constraints[] = {
289 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
290 FIXED_EVENT_CONSTRAINT(0x01c0, 0),
291 FIXED_EVENT_CONSTRAINT(0x003c, 1),
292 FIXED_EVENT_CONSTRAINT(0x0300, 2),
293 FIXED_EVENT_CONSTRAINT(0x0400, 3),
294 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
295 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
296 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
297 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
298 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
299 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
300 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
301 METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
302
303 INTEL_EVENT_CONSTRAINT(0x2e, 0xff),
304 INTEL_EVENT_CONSTRAINT(0x3c, 0xff),
305
306
307
308
309 INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
310
311 INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
312 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
313 INTEL_UEVENT_CONSTRAINT(0x08a3, 0xf),
314 INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
315 INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
316 INTEL_UEVENT_CONSTRAINT(0x02cd, 0x1),
317 INTEL_EVENT_CONSTRAINT(0xce, 0x1),
318 INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
319
320
321
322
323 INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0xff),
324
325 EVENT_CONSTRAINT_END
326};
327
328
329EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
330EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
331EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
332
333static struct attribute *nhm_mem_events_attrs[] = {
334 EVENT_PTR(mem_ld_nhm),
335 NULL,
336};
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
353 "event=0x3c,umask=0x0",
354 "event=0x3c,umask=0x0,any=1");
355EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
356EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
357 "event=0xe,umask=0x1");
358EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
359 "event=0xc2,umask=0x2");
360EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
361 "event=0x9c,umask=0x1");
362EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
363 "event=0xd,umask=0x3,cmask=1",
364 "event=0xd,umask=0x3,cmask=1,any=1");
365EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
366 "4", "2");
367
368EVENT_ATTR_STR(slots, slots, "event=0x00,umask=0x4");
369EVENT_ATTR_STR(topdown-retiring, td_retiring, "event=0x00,umask=0x80");
370EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec, "event=0x00,umask=0x81");
371EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound, "event=0x00,umask=0x82");
372EVENT_ATTR_STR(topdown-be-bound, td_be_bound, "event=0x00,umask=0x83");
373EVENT_ATTR_STR(topdown-heavy-ops, td_heavy_ops, "event=0x00,umask=0x84");
374EVENT_ATTR_STR(topdown-br-mispredict, td_br_mispredict, "event=0x00,umask=0x85");
375EVENT_ATTR_STR(topdown-fetch-lat, td_fetch_lat, "event=0x00,umask=0x86");
376EVENT_ATTR_STR(topdown-mem-bound, td_mem_bound, "event=0x00,umask=0x87");
377
378static struct attribute *snb_events_attrs[] = {
379 EVENT_PTR(td_slots_issued),
380 EVENT_PTR(td_slots_retired),
381 EVENT_PTR(td_fetch_bubbles),
382 EVENT_PTR(td_total_slots),
383 EVENT_PTR(td_total_slots_scale),
384 EVENT_PTR(td_recovery_bubbles),
385 EVENT_PTR(td_recovery_bubbles_scale),
386 NULL,
387};
388
389static struct attribute *snb_mem_events_attrs[] = {
390 EVENT_PTR(mem_ld_snb),
391 EVENT_PTR(mem_st_snb),
392 NULL,
393};
394
395static struct event_constraint intel_hsw_event_constraints[] = {
396 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
397 FIXED_EVENT_CONSTRAINT(0x003c, 1),
398 FIXED_EVENT_CONSTRAINT(0x0300, 2),
399 INTEL_UEVENT_CONSTRAINT(0x148, 0x4),
400 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2),
401 INTEL_EVENT_CONSTRAINT(0xcd, 0x8),
402
403 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
404
405 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
406
407 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
408
409
410
411
412
413 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf),
414 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf),
415 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf),
416 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf),
417
418 EVENT_CONSTRAINT_END
419};
420
421static struct event_constraint intel_bdw_event_constraints[] = {
422 FIXED_EVENT_CONSTRAINT(0x00c0, 0),
423 FIXED_EVENT_CONSTRAINT(0x003c, 1),
424 FIXED_EVENT_CONSTRAINT(0x0300, 2),
425 INTEL_UEVENT_CONSTRAINT(0x148, 0x4),
426 INTEL_UBIT_EVENT_CONSTRAINT(0x8a3, 0x4),
427
428
429
430 INTEL_EVENT_CONSTRAINT(0xd0, 0xf),
431 INTEL_EVENT_CONSTRAINT(0xd1, 0xf),
432 INTEL_EVENT_CONSTRAINT(0xd2, 0xf),
433 INTEL_EVENT_CONSTRAINT(0xcd, 0xf),
434 EVENT_CONSTRAINT_END
435};
436
437static u64 intel_pmu_event_map(int hw_event)
438{
439 return intel_perfmon_event_map[hw_event];
440}
441
442static __initconst const u64 spr_hw_cache_event_ids
443 [PERF_COUNT_HW_CACHE_MAX]
444 [PERF_COUNT_HW_CACHE_OP_MAX]
445 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
446{
447 [ C(L1D ) ] = {
448 [ C(OP_READ) ] = {
449 [ C(RESULT_ACCESS) ] = 0x81d0,
450 [ C(RESULT_MISS) ] = 0xe124,
451 },
452 [ C(OP_WRITE) ] = {
453 [ C(RESULT_ACCESS) ] = 0x82d0,
454 },
455 },
456 [ C(L1I ) ] = {
457 [ C(OP_READ) ] = {
458 [ C(RESULT_MISS) ] = 0xe424,
459 },
460 [ C(OP_WRITE) ] = {
461 [ C(RESULT_ACCESS) ] = -1,
462 [ C(RESULT_MISS) ] = -1,
463 },
464 },
465 [ C(LL ) ] = {
466 [ C(OP_READ) ] = {
467 [ C(RESULT_ACCESS) ] = 0x12a,
468 [ C(RESULT_MISS) ] = 0x12a,
469 },
470 [ C(OP_WRITE) ] = {
471 [ C(RESULT_ACCESS) ] = 0x12a,
472 [ C(RESULT_MISS) ] = 0x12a,
473 },
474 },
475 [ C(DTLB) ] = {
476 [ C(OP_READ) ] = {
477 [ C(RESULT_ACCESS) ] = 0x81d0,
478 [ C(RESULT_MISS) ] = 0xe12,
479 },
480 [ C(OP_WRITE) ] = {
481 [ C(RESULT_ACCESS) ] = 0x82d0,
482 [ C(RESULT_MISS) ] = 0xe13,
483 },
484 },
485 [ C(ITLB) ] = {
486 [ C(OP_READ) ] = {
487 [ C(RESULT_ACCESS) ] = -1,
488 [ C(RESULT_MISS) ] = 0xe11,
489 },
490 [ C(OP_WRITE) ] = {
491 [ C(RESULT_ACCESS) ] = -1,
492 [ C(RESULT_MISS) ] = -1,
493 },
494 [ C(OP_PREFETCH) ] = {
495 [ C(RESULT_ACCESS) ] = -1,
496 [ C(RESULT_MISS) ] = -1,
497 },
498 },
499 [ C(BPU ) ] = {
500 [ C(OP_READ) ] = {
501 [ C(RESULT_ACCESS) ] = 0x4c4,
502 [ C(RESULT_MISS) ] = 0x4c5,
503 },
504 [ C(OP_WRITE) ] = {
505 [ C(RESULT_ACCESS) ] = -1,
506 [ C(RESULT_MISS) ] = -1,
507 },
508 [ C(OP_PREFETCH) ] = {
509 [ C(RESULT_ACCESS) ] = -1,
510 [ C(RESULT_MISS) ] = -1,
511 },
512 },
513 [ C(NODE) ] = {
514 [ C(OP_READ) ] = {
515 [ C(RESULT_ACCESS) ] = 0x12a,
516 [ C(RESULT_MISS) ] = 0x12a,
517 },
518 },
519};
520
521static __initconst const u64 spr_hw_cache_extra_regs
522 [PERF_COUNT_HW_CACHE_MAX]
523 [PERF_COUNT_HW_CACHE_OP_MAX]
524 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
525{
526 [ C(LL ) ] = {
527 [ C(OP_READ) ] = {
528 [ C(RESULT_ACCESS) ] = 0x10001,
529 [ C(RESULT_MISS) ] = 0x3fbfc00001,
530 },
531 [ C(OP_WRITE) ] = {
532 [ C(RESULT_ACCESS) ] = 0x3f3ffc0002,
533 [ C(RESULT_MISS) ] = 0x3f3fc00002,
534 },
535 },
536 [ C(NODE) ] = {
537 [ C(OP_READ) ] = {
538 [ C(RESULT_ACCESS) ] = 0x10c000001,
539 [ C(RESULT_MISS) ] = 0x3fb3000001,
540 },
541 },
542};
543
544
545
546
547
548
549
550
551
552
553#define SKL_DEMAND_DATA_RD BIT_ULL(0)
554#define SKL_DEMAND_RFO BIT_ULL(1)
555#define SKL_ANY_RESPONSE BIT_ULL(16)
556#define SKL_SUPPLIER_NONE BIT_ULL(17)
557#define SKL_L3_MISS_LOCAL_DRAM BIT_ULL(26)
558#define SKL_L3_MISS_REMOTE_HOP0_DRAM BIT_ULL(27)
559#define SKL_L3_MISS_REMOTE_HOP1_DRAM BIT_ULL(28)
560#define SKL_L3_MISS_REMOTE_HOP2P_DRAM BIT_ULL(29)
561#define SKL_L3_MISS (SKL_L3_MISS_LOCAL_DRAM| \
562 SKL_L3_MISS_REMOTE_HOP0_DRAM| \
563 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
564 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
565#define SKL_SPL_HIT BIT_ULL(30)
566#define SKL_SNOOP_NONE BIT_ULL(31)
567#define SKL_SNOOP_NOT_NEEDED BIT_ULL(32)
568#define SKL_SNOOP_MISS BIT_ULL(33)
569#define SKL_SNOOP_HIT_NO_FWD BIT_ULL(34)
570#define SKL_SNOOP_HIT_WITH_FWD BIT_ULL(35)
571#define SKL_SNOOP_HITM BIT_ULL(36)
572#define SKL_SNOOP_NON_DRAM BIT_ULL(37)
573#define SKL_ANY_SNOOP (SKL_SPL_HIT|SKL_SNOOP_NONE| \
574 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
575 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
576 SKL_SNOOP_HITM|SKL_SNOOP_NON_DRAM)
577#define SKL_DEMAND_READ SKL_DEMAND_DATA_RD
578#define SKL_SNOOP_DRAM (SKL_SNOOP_NONE| \
579 SKL_SNOOP_NOT_NEEDED|SKL_SNOOP_MISS| \
580 SKL_SNOOP_HIT_NO_FWD|SKL_SNOOP_HIT_WITH_FWD| \
581 SKL_SNOOP_HITM|SKL_SPL_HIT)
582#define SKL_DEMAND_WRITE SKL_DEMAND_RFO
583#define SKL_LLC_ACCESS SKL_ANY_RESPONSE
584#define SKL_L3_MISS_REMOTE (SKL_L3_MISS_REMOTE_HOP0_DRAM| \
585 SKL_L3_MISS_REMOTE_HOP1_DRAM| \
586 SKL_L3_MISS_REMOTE_HOP2P_DRAM)
587
588static __initconst const u64 skl_hw_cache_event_ids
589 [PERF_COUNT_HW_CACHE_MAX]
590 [PERF_COUNT_HW_CACHE_OP_MAX]
591 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
592{
593 [ C(L1D ) ] = {
594 [ C(OP_READ) ] = {
595 [ C(RESULT_ACCESS) ] = 0x81d0,
596 [ C(RESULT_MISS) ] = 0x151,
597 },
598 [ C(OP_WRITE) ] = {
599 [ C(RESULT_ACCESS) ] = 0x82d0,
600 [ C(RESULT_MISS) ] = 0x0,
601 },
602 [ C(OP_PREFETCH) ] = {
603 [ C(RESULT_ACCESS) ] = 0x0,
604 [ C(RESULT_MISS) ] = 0x0,
605 },
606 },
607 [ C(L1I ) ] = {
608 [ C(OP_READ) ] = {
609 [ C(RESULT_ACCESS) ] = 0x0,
610 [ C(RESULT_MISS) ] = 0x283,
611 },
612 [ C(OP_WRITE) ] = {
613 [ C(RESULT_ACCESS) ] = -1,
614 [ C(RESULT_MISS) ] = -1,
615 },
616 [ C(OP_PREFETCH) ] = {
617 [ C(RESULT_ACCESS) ] = 0x0,
618 [ C(RESULT_MISS) ] = 0x0,
619 },
620 },
621 [ C(LL ) ] = {
622 [ C(OP_READ) ] = {
623 [ C(RESULT_ACCESS) ] = 0x1b7,
624 [ C(RESULT_MISS) ] = 0x1b7,
625 },
626 [ C(OP_WRITE) ] = {
627 [ C(RESULT_ACCESS) ] = 0x1b7,
628 [ C(RESULT_MISS) ] = 0x1b7,
629 },
630 [ C(OP_PREFETCH) ] = {
631 [ C(RESULT_ACCESS) ] = 0x0,
632 [ C(RESULT_MISS) ] = 0x0,
633 },
634 },
635 [ C(DTLB) ] = {
636 [ C(OP_READ) ] = {
637 [ C(RESULT_ACCESS) ] = 0x81d0,
638 [ C(RESULT_MISS) ] = 0xe08,
639 },
640 [ C(OP_WRITE) ] = {
641 [ C(RESULT_ACCESS) ] = 0x82d0,
642 [ C(RESULT_MISS) ] = 0xe49,
643 },
644 [ C(OP_PREFETCH) ] = {
645 [ C(RESULT_ACCESS) ] = 0x0,
646 [ C(RESULT_MISS) ] = 0x0,
647 },
648 },
649 [ C(ITLB) ] = {
650 [ C(OP_READ) ] = {
651 [ C(RESULT_ACCESS) ] = 0x2085,
652 [ C(RESULT_MISS) ] = 0xe85,
653 },
654 [ C(OP_WRITE) ] = {
655 [ C(RESULT_ACCESS) ] = -1,
656 [ C(RESULT_MISS) ] = -1,
657 },
658 [ C(OP_PREFETCH) ] = {
659 [ C(RESULT_ACCESS) ] = -1,
660 [ C(RESULT_MISS) ] = -1,
661 },
662 },
663 [ C(BPU ) ] = {
664 [ C(OP_READ) ] = {
665 [ C(RESULT_ACCESS) ] = 0xc4,
666 [ C(RESULT_MISS) ] = 0xc5,
667 },
668 [ C(OP_WRITE) ] = {
669 [ C(RESULT_ACCESS) ] = -1,
670 [ C(RESULT_MISS) ] = -1,
671 },
672 [ C(OP_PREFETCH) ] = {
673 [ C(RESULT_ACCESS) ] = -1,
674 [ C(RESULT_MISS) ] = -1,
675 },
676 },
677 [ C(NODE) ] = {
678 [ C(OP_READ) ] = {
679 [ C(RESULT_ACCESS) ] = 0x1b7,
680 [ C(RESULT_MISS) ] = 0x1b7,
681 },
682 [ C(OP_WRITE) ] = {
683 [ C(RESULT_ACCESS) ] = 0x1b7,
684 [ C(RESULT_MISS) ] = 0x1b7,
685 },
686 [ C(OP_PREFETCH) ] = {
687 [ C(RESULT_ACCESS) ] = 0x0,
688 [ C(RESULT_MISS) ] = 0x0,
689 },
690 },
691};
692
693static __initconst const u64 skl_hw_cache_extra_regs
694 [PERF_COUNT_HW_CACHE_MAX]
695 [PERF_COUNT_HW_CACHE_OP_MAX]
696 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
697{
698 [ C(LL ) ] = {
699 [ C(OP_READ) ] = {
700 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
701 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
702 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
703 SKL_L3_MISS|SKL_ANY_SNOOP|
704 SKL_SUPPLIER_NONE,
705 },
706 [ C(OP_WRITE) ] = {
707 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
708 SKL_LLC_ACCESS|SKL_ANY_SNOOP,
709 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
710 SKL_L3_MISS|SKL_ANY_SNOOP|
711 SKL_SUPPLIER_NONE,
712 },
713 [ C(OP_PREFETCH) ] = {
714 [ C(RESULT_ACCESS) ] = 0x0,
715 [ C(RESULT_MISS) ] = 0x0,
716 },
717 },
718 [ C(NODE) ] = {
719 [ C(OP_READ) ] = {
720 [ C(RESULT_ACCESS) ] = SKL_DEMAND_READ|
721 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
722 [ C(RESULT_MISS) ] = SKL_DEMAND_READ|
723 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
724 },
725 [ C(OP_WRITE) ] = {
726 [ C(RESULT_ACCESS) ] = SKL_DEMAND_WRITE|
727 SKL_L3_MISS_LOCAL_DRAM|SKL_SNOOP_DRAM,
728 [ C(RESULT_MISS) ] = SKL_DEMAND_WRITE|
729 SKL_L3_MISS_REMOTE|SKL_SNOOP_DRAM,
730 },
731 [ C(OP_PREFETCH) ] = {
732 [ C(RESULT_ACCESS) ] = 0x0,
733 [ C(RESULT_MISS) ] = 0x0,
734 },
735 },
736};
737
738#define SNB_DMND_DATA_RD (1ULL << 0)
739#define SNB_DMND_RFO (1ULL << 1)
740#define SNB_DMND_IFETCH (1ULL << 2)
741#define SNB_DMND_WB (1ULL << 3)
742#define SNB_PF_DATA_RD (1ULL << 4)
743#define SNB_PF_RFO (1ULL << 5)
744#define SNB_PF_IFETCH (1ULL << 6)
745#define SNB_LLC_DATA_RD (1ULL << 7)
746#define SNB_LLC_RFO (1ULL << 8)
747#define SNB_LLC_IFETCH (1ULL << 9)
748#define SNB_BUS_LOCKS (1ULL << 10)
749#define SNB_STRM_ST (1ULL << 11)
750#define SNB_OTHER (1ULL << 15)
751#define SNB_RESP_ANY (1ULL << 16)
752#define SNB_NO_SUPP (1ULL << 17)
753#define SNB_LLC_HITM (1ULL << 18)
754#define SNB_LLC_HITE (1ULL << 19)
755#define SNB_LLC_HITS (1ULL << 20)
756#define SNB_LLC_HITF (1ULL << 21)
757#define SNB_LOCAL (1ULL << 22)
758#define SNB_REMOTE (0xffULL << 23)
759#define SNB_SNP_NONE (1ULL << 31)
760#define SNB_SNP_NOT_NEEDED (1ULL << 32)
761#define SNB_SNP_MISS (1ULL << 33)
762#define SNB_NO_FWD (1ULL << 34)
763#define SNB_SNP_FWD (1ULL << 35)
764#define SNB_HITM (1ULL << 36)
765#define SNB_NON_DRAM (1ULL << 37)
766
767#define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
768#define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
769#define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
770
771#define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
772 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
773 SNB_HITM)
774
775#define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
776#define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
777
778#define SNB_L3_ACCESS SNB_RESP_ANY
779#define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
780
781static __initconst const u64 snb_hw_cache_extra_regs
782 [PERF_COUNT_HW_CACHE_MAX]
783 [PERF_COUNT_HW_CACHE_OP_MAX]
784 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
785{
786 [ C(LL ) ] = {
787 [ C(OP_READ) ] = {
788 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
789 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
790 },
791 [ C(OP_WRITE) ] = {
792 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
793 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
794 },
795 [ C(OP_PREFETCH) ] = {
796 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
797 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
798 },
799 },
800 [ C(NODE) ] = {
801 [ C(OP_READ) ] = {
802 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
803 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
804 },
805 [ C(OP_WRITE) ] = {
806 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
807 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
808 },
809 [ C(OP_PREFETCH) ] = {
810 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
811 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
812 },
813 },
814};
815
816static __initconst const u64 snb_hw_cache_event_ids
817 [PERF_COUNT_HW_CACHE_MAX]
818 [PERF_COUNT_HW_CACHE_OP_MAX]
819 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
820{
821 [ C(L1D) ] = {
822 [ C(OP_READ) ] = {
823 [ C(RESULT_ACCESS) ] = 0xf1d0,
824 [ C(RESULT_MISS) ] = 0x0151,
825 },
826 [ C(OP_WRITE) ] = {
827 [ C(RESULT_ACCESS) ] = 0xf2d0,
828 [ C(RESULT_MISS) ] = 0x0851,
829 },
830 [ C(OP_PREFETCH) ] = {
831 [ C(RESULT_ACCESS) ] = 0x0,
832 [ C(RESULT_MISS) ] = 0x024e,
833 },
834 },
835 [ C(L1I ) ] = {
836 [ C(OP_READ) ] = {
837 [ C(RESULT_ACCESS) ] = 0x0,
838 [ C(RESULT_MISS) ] = 0x0280,
839 },
840 [ C(OP_WRITE) ] = {
841 [ C(RESULT_ACCESS) ] = -1,
842 [ C(RESULT_MISS) ] = -1,
843 },
844 [ C(OP_PREFETCH) ] = {
845 [ C(RESULT_ACCESS) ] = 0x0,
846 [ C(RESULT_MISS) ] = 0x0,
847 },
848 },
849 [ C(LL ) ] = {
850 [ C(OP_READ) ] = {
851
852 [ C(RESULT_ACCESS) ] = 0x01b7,
853
854 [ C(RESULT_MISS) ] = 0x01b7,
855 },
856 [ C(OP_WRITE) ] = {
857
858 [ C(RESULT_ACCESS) ] = 0x01b7,
859
860 [ C(RESULT_MISS) ] = 0x01b7,
861 },
862 [ C(OP_PREFETCH) ] = {
863
864 [ C(RESULT_ACCESS) ] = 0x01b7,
865
866 [ C(RESULT_MISS) ] = 0x01b7,
867 },
868 },
869 [ C(DTLB) ] = {
870 [ C(OP_READ) ] = {
871 [ C(RESULT_ACCESS) ] = 0x81d0,
872 [ C(RESULT_MISS) ] = 0x0108,
873 },
874 [ C(OP_WRITE) ] = {
875 [ C(RESULT_ACCESS) ] = 0x82d0,
876 [ C(RESULT_MISS) ] = 0x0149,
877 },
878 [ C(OP_PREFETCH) ] = {
879 [ C(RESULT_ACCESS) ] = 0x0,
880 [ C(RESULT_MISS) ] = 0x0,
881 },
882 },
883 [ C(ITLB) ] = {
884 [ C(OP_READ) ] = {
885 [ C(RESULT_ACCESS) ] = 0x1085,
886 [ C(RESULT_MISS) ] = 0x0185,
887 },
888 [ C(OP_WRITE) ] = {
889 [ C(RESULT_ACCESS) ] = -1,
890 [ C(RESULT_MISS) ] = -1,
891 },
892 [ C(OP_PREFETCH) ] = {
893 [ C(RESULT_ACCESS) ] = -1,
894 [ C(RESULT_MISS) ] = -1,
895 },
896 },
897 [ C(BPU ) ] = {
898 [ C(OP_READ) ] = {
899 [ C(RESULT_ACCESS) ] = 0x00c4,
900 [ C(RESULT_MISS) ] = 0x00c5,
901 },
902 [ C(OP_WRITE) ] = {
903 [ C(RESULT_ACCESS) ] = -1,
904 [ C(RESULT_MISS) ] = -1,
905 },
906 [ C(OP_PREFETCH) ] = {
907 [ C(RESULT_ACCESS) ] = -1,
908 [ C(RESULT_MISS) ] = -1,
909 },
910 },
911 [ C(NODE) ] = {
912 [ C(OP_READ) ] = {
913 [ C(RESULT_ACCESS) ] = 0x01b7,
914 [ C(RESULT_MISS) ] = 0x01b7,
915 },
916 [ C(OP_WRITE) ] = {
917 [ C(RESULT_ACCESS) ] = 0x01b7,
918 [ C(RESULT_MISS) ] = 0x01b7,
919 },
920 [ C(OP_PREFETCH) ] = {
921 [ C(RESULT_ACCESS) ] = 0x01b7,
922 [ C(RESULT_MISS) ] = 0x01b7,
923 },
924 },
925
926};
927
928
929
930
931
932
933
934
935
936
937#define HSW_DEMAND_DATA_RD BIT_ULL(0)
938#define HSW_DEMAND_RFO BIT_ULL(1)
939#define HSW_ANY_RESPONSE BIT_ULL(16)
940#define HSW_SUPPLIER_NONE BIT_ULL(17)
941#define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
942#define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
943#define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
944#define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
945#define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
946 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
947 HSW_L3_MISS_REMOTE_HOP2P)
948#define HSW_SNOOP_NONE BIT_ULL(31)
949#define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
950#define HSW_SNOOP_MISS BIT_ULL(33)
951#define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
952#define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
953#define HSW_SNOOP_HITM BIT_ULL(36)
954#define HSW_SNOOP_NON_DRAM BIT_ULL(37)
955#define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
956 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
957 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
958 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
959#define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
960#define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
961#define HSW_DEMAND_WRITE HSW_DEMAND_RFO
962#define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
963 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
964#define HSW_LLC_ACCESS HSW_ANY_RESPONSE
965
966#define BDW_L3_MISS_LOCAL BIT(26)
967#define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
968 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
969 HSW_L3_MISS_REMOTE_HOP2P)
970
971
972static __initconst const u64 hsw_hw_cache_event_ids
973 [PERF_COUNT_HW_CACHE_MAX]
974 [PERF_COUNT_HW_CACHE_OP_MAX]
975 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
976{
977 [ C(L1D ) ] = {
978 [ C(OP_READ) ] = {
979 [ C(RESULT_ACCESS) ] = 0x81d0,
980 [ C(RESULT_MISS) ] = 0x151,
981 },
982 [ C(OP_WRITE) ] = {
983 [ C(RESULT_ACCESS) ] = 0x82d0,
984 [ C(RESULT_MISS) ] = 0x0,
985 },
986 [ C(OP_PREFETCH) ] = {
987 [ C(RESULT_ACCESS) ] = 0x0,
988 [ C(RESULT_MISS) ] = 0x0,
989 },
990 },
991 [ C(L1I ) ] = {
992 [ C(OP_READ) ] = {
993 [ C(RESULT_ACCESS) ] = 0x0,
994 [ C(RESULT_MISS) ] = 0x280,
995 },
996 [ C(OP_WRITE) ] = {
997 [ C(RESULT_ACCESS) ] = -1,
998 [ C(RESULT_MISS) ] = -1,
999 },
1000 [ C(OP_PREFETCH) ] = {
1001 [ C(RESULT_ACCESS) ] = 0x0,
1002 [ C(RESULT_MISS) ] = 0x0,
1003 },
1004 },
1005 [ C(LL ) ] = {
1006 [ C(OP_READ) ] = {
1007 [ C(RESULT_ACCESS) ] = 0x1b7,
1008 [ C(RESULT_MISS) ] = 0x1b7,
1009 },
1010 [ C(OP_WRITE) ] = {
1011 [ C(RESULT_ACCESS) ] = 0x1b7,
1012 [ C(RESULT_MISS) ] = 0x1b7,
1013 },
1014 [ C(OP_PREFETCH) ] = {
1015 [ C(RESULT_ACCESS) ] = 0x0,
1016 [ C(RESULT_MISS) ] = 0x0,
1017 },
1018 },
1019 [ C(DTLB) ] = {
1020 [ C(OP_READ) ] = {
1021 [ C(RESULT_ACCESS) ] = 0x81d0,
1022 [ C(RESULT_MISS) ] = 0x108,
1023 },
1024 [ C(OP_WRITE) ] = {
1025 [ C(RESULT_ACCESS) ] = 0x82d0,
1026 [ C(RESULT_MISS) ] = 0x149,
1027 },
1028 [ C(OP_PREFETCH) ] = {
1029 [ C(RESULT_ACCESS) ] = 0x0,
1030 [ C(RESULT_MISS) ] = 0x0,
1031 },
1032 },
1033 [ C(ITLB) ] = {
1034 [ C(OP_READ) ] = {
1035 [ C(RESULT_ACCESS) ] = 0x6085,
1036 [ C(RESULT_MISS) ] = 0x185,
1037 },
1038 [ C(OP_WRITE) ] = {
1039 [ C(RESULT_ACCESS) ] = -1,
1040 [ C(RESULT_MISS) ] = -1,
1041 },
1042 [ C(OP_PREFETCH) ] = {
1043 [ C(RESULT_ACCESS) ] = -1,
1044 [ C(RESULT_MISS) ] = -1,
1045 },
1046 },
1047 [ C(BPU ) ] = {
1048 [ C(OP_READ) ] = {
1049 [ C(RESULT_ACCESS) ] = 0xc4,
1050 [ C(RESULT_MISS) ] = 0xc5,
1051 },
1052 [ C(OP_WRITE) ] = {
1053 [ C(RESULT_ACCESS) ] = -1,
1054 [ C(RESULT_MISS) ] = -1,
1055 },
1056 [ C(OP_PREFETCH) ] = {
1057 [ C(RESULT_ACCESS) ] = -1,
1058 [ C(RESULT_MISS) ] = -1,
1059 },
1060 },
1061 [ C(NODE) ] = {
1062 [ C(OP_READ) ] = {
1063 [ C(RESULT_ACCESS) ] = 0x1b7,
1064 [ C(RESULT_MISS) ] = 0x1b7,
1065 },
1066 [ C(OP_WRITE) ] = {
1067 [ C(RESULT_ACCESS) ] = 0x1b7,
1068 [ C(RESULT_MISS) ] = 0x1b7,
1069 },
1070 [ C(OP_PREFETCH) ] = {
1071 [ C(RESULT_ACCESS) ] = 0x0,
1072 [ C(RESULT_MISS) ] = 0x0,
1073 },
1074 },
1075};
1076
1077static __initconst const u64 hsw_hw_cache_extra_regs
1078 [PERF_COUNT_HW_CACHE_MAX]
1079 [PERF_COUNT_HW_CACHE_OP_MAX]
1080 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1081{
1082 [ C(LL ) ] = {
1083 [ C(OP_READ) ] = {
1084 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1085 HSW_LLC_ACCESS,
1086 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1087 HSW_L3_MISS|HSW_ANY_SNOOP,
1088 },
1089 [ C(OP_WRITE) ] = {
1090 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1091 HSW_LLC_ACCESS,
1092 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1093 HSW_L3_MISS|HSW_ANY_SNOOP,
1094 },
1095 [ C(OP_PREFETCH) ] = {
1096 [ C(RESULT_ACCESS) ] = 0x0,
1097 [ C(RESULT_MISS) ] = 0x0,
1098 },
1099 },
1100 [ C(NODE) ] = {
1101 [ C(OP_READ) ] = {
1102 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
1103 HSW_L3_MISS_LOCAL_DRAM|
1104 HSW_SNOOP_DRAM,
1105 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
1106 HSW_L3_MISS_REMOTE|
1107 HSW_SNOOP_DRAM,
1108 },
1109 [ C(OP_WRITE) ] = {
1110 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
1111 HSW_L3_MISS_LOCAL_DRAM|
1112 HSW_SNOOP_DRAM,
1113 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
1114 HSW_L3_MISS_REMOTE|
1115 HSW_SNOOP_DRAM,
1116 },
1117 [ C(OP_PREFETCH) ] = {
1118 [ C(RESULT_ACCESS) ] = 0x0,
1119 [ C(RESULT_MISS) ] = 0x0,
1120 },
1121 },
1122};
1123
1124static __initconst const u64 westmere_hw_cache_event_ids
1125 [PERF_COUNT_HW_CACHE_MAX]
1126 [PERF_COUNT_HW_CACHE_OP_MAX]
1127 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1128{
1129 [ C(L1D) ] = {
1130 [ C(OP_READ) ] = {
1131 [ C(RESULT_ACCESS) ] = 0x010b,
1132 [ C(RESULT_MISS) ] = 0x0151,
1133 },
1134 [ C(OP_WRITE) ] = {
1135 [ C(RESULT_ACCESS) ] = 0x020b,
1136 [ C(RESULT_MISS) ] = 0x0251,
1137 },
1138 [ C(OP_PREFETCH) ] = {
1139 [ C(RESULT_ACCESS) ] = 0x014e,
1140 [ C(RESULT_MISS) ] = 0x024e,
1141 },
1142 },
1143 [ C(L1I ) ] = {
1144 [ C(OP_READ) ] = {
1145 [ C(RESULT_ACCESS) ] = 0x0380,
1146 [ C(RESULT_MISS) ] = 0x0280,
1147 },
1148 [ C(OP_WRITE) ] = {
1149 [ C(RESULT_ACCESS) ] = -1,
1150 [ C(RESULT_MISS) ] = -1,
1151 },
1152 [ C(OP_PREFETCH) ] = {
1153 [ C(RESULT_ACCESS) ] = 0x0,
1154 [ C(RESULT_MISS) ] = 0x0,
1155 },
1156 },
1157 [ C(LL ) ] = {
1158 [ C(OP_READ) ] = {
1159
1160 [ C(RESULT_ACCESS) ] = 0x01b7,
1161
1162 [ C(RESULT_MISS) ] = 0x01b7,
1163 },
1164
1165
1166
1167
1168 [ C(OP_WRITE) ] = {
1169
1170 [ C(RESULT_ACCESS) ] = 0x01b7,
1171
1172 [ C(RESULT_MISS) ] = 0x01b7,
1173 },
1174 [ C(OP_PREFETCH) ] = {
1175
1176 [ C(RESULT_ACCESS) ] = 0x01b7,
1177
1178 [ C(RESULT_MISS) ] = 0x01b7,
1179 },
1180 },
1181 [ C(DTLB) ] = {
1182 [ C(OP_READ) ] = {
1183 [ C(RESULT_ACCESS) ] = 0x010b,
1184 [ C(RESULT_MISS) ] = 0x0108,
1185 },
1186 [ C(OP_WRITE) ] = {
1187 [ C(RESULT_ACCESS) ] = 0x020b,
1188 [ C(RESULT_MISS) ] = 0x010c,
1189 },
1190 [ C(OP_PREFETCH) ] = {
1191 [ C(RESULT_ACCESS) ] = 0x0,
1192 [ C(RESULT_MISS) ] = 0x0,
1193 },
1194 },
1195 [ C(ITLB) ] = {
1196 [ C(OP_READ) ] = {
1197 [ C(RESULT_ACCESS) ] = 0x01c0,
1198 [ C(RESULT_MISS) ] = 0x0185,
1199 },
1200 [ C(OP_WRITE) ] = {
1201 [ C(RESULT_ACCESS) ] = -1,
1202 [ C(RESULT_MISS) ] = -1,
1203 },
1204 [ C(OP_PREFETCH) ] = {
1205 [ C(RESULT_ACCESS) ] = -1,
1206 [ C(RESULT_MISS) ] = -1,
1207 },
1208 },
1209 [ C(BPU ) ] = {
1210 [ C(OP_READ) ] = {
1211 [ C(RESULT_ACCESS) ] = 0x00c4,
1212 [ C(RESULT_MISS) ] = 0x03e8,
1213 },
1214 [ C(OP_WRITE) ] = {
1215 [ C(RESULT_ACCESS) ] = -1,
1216 [ C(RESULT_MISS) ] = -1,
1217 },
1218 [ C(OP_PREFETCH) ] = {
1219 [ C(RESULT_ACCESS) ] = -1,
1220 [ C(RESULT_MISS) ] = -1,
1221 },
1222 },
1223 [ C(NODE) ] = {
1224 [ C(OP_READ) ] = {
1225 [ C(RESULT_ACCESS) ] = 0x01b7,
1226 [ C(RESULT_MISS) ] = 0x01b7,
1227 },
1228 [ C(OP_WRITE) ] = {
1229 [ C(RESULT_ACCESS) ] = 0x01b7,
1230 [ C(RESULT_MISS) ] = 0x01b7,
1231 },
1232 [ C(OP_PREFETCH) ] = {
1233 [ C(RESULT_ACCESS) ] = 0x01b7,
1234 [ C(RESULT_MISS) ] = 0x01b7,
1235 },
1236 },
1237};
1238
1239
1240
1241
1242
1243
1244#define NHM_DMND_DATA_RD (1 << 0)
1245#define NHM_DMND_RFO (1 << 1)
1246#define NHM_DMND_IFETCH (1 << 2)
1247#define NHM_DMND_WB (1 << 3)
1248#define NHM_PF_DATA_RD (1 << 4)
1249#define NHM_PF_DATA_RFO (1 << 5)
1250#define NHM_PF_IFETCH (1 << 6)
1251#define NHM_OFFCORE_OTHER (1 << 7)
1252#define NHM_UNCORE_HIT (1 << 8)
1253#define NHM_OTHER_CORE_HIT_SNP (1 << 9)
1254#define NHM_OTHER_CORE_HITM (1 << 10)
1255
1256#define NHM_REMOTE_CACHE_FWD (1 << 12)
1257#define NHM_REMOTE_DRAM (1 << 13)
1258#define NHM_LOCAL_DRAM (1 << 14)
1259#define NHM_NON_DRAM (1 << 15)
1260
1261#define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
1262#define NHM_REMOTE (NHM_REMOTE_DRAM)
1263
1264#define NHM_DMND_READ (NHM_DMND_DATA_RD)
1265#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
1266#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
1267
1268#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
1269#define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
1270#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
1271
1272static __initconst const u64 nehalem_hw_cache_extra_regs
1273 [PERF_COUNT_HW_CACHE_MAX]
1274 [PERF_COUNT_HW_CACHE_OP_MAX]
1275 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1276{
1277 [ C(LL ) ] = {
1278 [ C(OP_READ) ] = {
1279 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
1280 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
1281 },
1282 [ C(OP_WRITE) ] = {
1283 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
1284 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
1285 },
1286 [ C(OP_PREFETCH) ] = {
1287 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
1288 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
1289 },
1290 },
1291 [ C(NODE) ] = {
1292 [ C(OP_READ) ] = {
1293 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
1294 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
1295 },
1296 [ C(OP_WRITE) ] = {
1297 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
1298 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
1299 },
1300 [ C(OP_PREFETCH) ] = {
1301 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
1302 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
1303 },
1304 },
1305};
1306
1307static __initconst const u64 nehalem_hw_cache_event_ids
1308 [PERF_COUNT_HW_CACHE_MAX]
1309 [PERF_COUNT_HW_CACHE_OP_MAX]
1310 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1311{
1312 [ C(L1D) ] = {
1313 [ C(OP_READ) ] = {
1314 [ C(RESULT_ACCESS) ] = 0x010b,
1315 [ C(RESULT_MISS) ] = 0x0151,
1316 },
1317 [ C(OP_WRITE) ] = {
1318 [ C(RESULT_ACCESS) ] = 0x020b,
1319 [ C(RESULT_MISS) ] = 0x0251,
1320 },
1321 [ C(OP_PREFETCH) ] = {
1322 [ C(RESULT_ACCESS) ] = 0x014e,
1323 [ C(RESULT_MISS) ] = 0x024e,
1324 },
1325 },
1326 [ C(L1I ) ] = {
1327 [ C(OP_READ) ] = {
1328 [ C(RESULT_ACCESS) ] = 0x0380,
1329 [ C(RESULT_MISS) ] = 0x0280,
1330 },
1331 [ C(OP_WRITE) ] = {
1332 [ C(RESULT_ACCESS) ] = -1,
1333 [ C(RESULT_MISS) ] = -1,
1334 },
1335 [ C(OP_PREFETCH) ] = {
1336 [ C(RESULT_ACCESS) ] = 0x0,
1337 [ C(RESULT_MISS) ] = 0x0,
1338 },
1339 },
1340 [ C(LL ) ] = {
1341 [ C(OP_READ) ] = {
1342
1343 [ C(RESULT_ACCESS) ] = 0x01b7,
1344
1345 [ C(RESULT_MISS) ] = 0x01b7,
1346 },
1347
1348
1349
1350
1351 [ C(OP_WRITE) ] = {
1352
1353 [ C(RESULT_ACCESS) ] = 0x01b7,
1354
1355 [ C(RESULT_MISS) ] = 0x01b7,
1356 },
1357 [ C(OP_PREFETCH) ] = {
1358
1359 [ C(RESULT_ACCESS) ] = 0x01b7,
1360
1361 [ C(RESULT_MISS) ] = 0x01b7,
1362 },
1363 },
1364 [ C(DTLB) ] = {
1365 [ C(OP_READ) ] = {
1366 [ C(RESULT_ACCESS) ] = 0x0f40,
1367 [ C(RESULT_MISS) ] = 0x0108,
1368 },
1369 [ C(OP_WRITE) ] = {
1370 [ C(RESULT_ACCESS) ] = 0x0f41,
1371 [ C(RESULT_MISS) ] = 0x010c,
1372 },
1373 [ C(OP_PREFETCH) ] = {
1374 [ C(RESULT_ACCESS) ] = 0x0,
1375 [ C(RESULT_MISS) ] = 0x0,
1376 },
1377 },
1378 [ C(ITLB) ] = {
1379 [ C(OP_READ) ] = {
1380 [ C(RESULT_ACCESS) ] = 0x01c0,
1381 [ C(RESULT_MISS) ] = 0x20c8,
1382 },
1383 [ C(OP_WRITE) ] = {
1384 [ C(RESULT_ACCESS) ] = -1,
1385 [ C(RESULT_MISS) ] = -1,
1386 },
1387 [ C(OP_PREFETCH) ] = {
1388 [ C(RESULT_ACCESS) ] = -1,
1389 [ C(RESULT_MISS) ] = -1,
1390 },
1391 },
1392 [ C(BPU ) ] = {
1393 [ C(OP_READ) ] = {
1394 [ C(RESULT_ACCESS) ] = 0x00c4,
1395 [ C(RESULT_MISS) ] = 0x03e8,
1396 },
1397 [ C(OP_WRITE) ] = {
1398 [ C(RESULT_ACCESS) ] = -1,
1399 [ C(RESULT_MISS) ] = -1,
1400 },
1401 [ C(OP_PREFETCH) ] = {
1402 [ C(RESULT_ACCESS) ] = -1,
1403 [ C(RESULT_MISS) ] = -1,
1404 },
1405 },
1406 [ C(NODE) ] = {
1407 [ C(OP_READ) ] = {
1408 [ C(RESULT_ACCESS) ] = 0x01b7,
1409 [ C(RESULT_MISS) ] = 0x01b7,
1410 },
1411 [ C(OP_WRITE) ] = {
1412 [ C(RESULT_ACCESS) ] = 0x01b7,
1413 [ C(RESULT_MISS) ] = 0x01b7,
1414 },
1415 [ C(OP_PREFETCH) ] = {
1416 [ C(RESULT_ACCESS) ] = 0x01b7,
1417 [ C(RESULT_MISS) ] = 0x01b7,
1418 },
1419 },
1420};
1421
1422static __initconst const u64 core2_hw_cache_event_ids
1423 [PERF_COUNT_HW_CACHE_MAX]
1424 [PERF_COUNT_HW_CACHE_OP_MAX]
1425 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1426{
1427 [ C(L1D) ] = {
1428 [ C(OP_READ) ] = {
1429 [ C(RESULT_ACCESS) ] = 0x0f40,
1430 [ C(RESULT_MISS) ] = 0x0140,
1431 },
1432 [ C(OP_WRITE) ] = {
1433 [ C(RESULT_ACCESS) ] = 0x0f41,
1434 [ C(RESULT_MISS) ] = 0x0141,
1435 },
1436 [ C(OP_PREFETCH) ] = {
1437 [ C(RESULT_ACCESS) ] = 0x104e,
1438 [ C(RESULT_MISS) ] = 0,
1439 },
1440 },
1441 [ C(L1I ) ] = {
1442 [ C(OP_READ) ] = {
1443 [ C(RESULT_ACCESS) ] = 0x0080,
1444 [ C(RESULT_MISS) ] = 0x0081,
1445 },
1446 [ C(OP_WRITE) ] = {
1447 [ C(RESULT_ACCESS) ] = -1,
1448 [ C(RESULT_MISS) ] = -1,
1449 },
1450 [ C(OP_PREFETCH) ] = {
1451 [ C(RESULT_ACCESS) ] = 0,
1452 [ C(RESULT_MISS) ] = 0,
1453 },
1454 },
1455 [ C(LL ) ] = {
1456 [ C(OP_READ) ] = {
1457 [ C(RESULT_ACCESS) ] = 0x4f29,
1458 [ C(RESULT_MISS) ] = 0x4129,
1459 },
1460 [ C(OP_WRITE) ] = {
1461 [ C(RESULT_ACCESS) ] = 0x4f2A,
1462 [ C(RESULT_MISS) ] = 0x412A,
1463 },
1464 [ C(OP_PREFETCH) ] = {
1465 [ C(RESULT_ACCESS) ] = 0,
1466 [ C(RESULT_MISS) ] = 0,
1467 },
1468 },
1469 [ C(DTLB) ] = {
1470 [ C(OP_READ) ] = {
1471 [ C(RESULT_ACCESS) ] = 0x0f40,
1472 [ C(RESULT_MISS) ] = 0x0208,
1473 },
1474 [ C(OP_WRITE) ] = {
1475 [ C(RESULT_ACCESS) ] = 0x0f41,
1476 [ C(RESULT_MISS) ] = 0x0808,
1477 },
1478 [ C(OP_PREFETCH) ] = {
1479 [ C(RESULT_ACCESS) ] = 0,
1480 [ C(RESULT_MISS) ] = 0,
1481 },
1482 },
1483 [ C(ITLB) ] = {
1484 [ C(OP_READ) ] = {
1485 [ C(RESULT_ACCESS) ] = 0x00c0,
1486 [ C(RESULT_MISS) ] = 0x1282,
1487 },
1488 [ C(OP_WRITE) ] = {
1489 [ C(RESULT_ACCESS) ] = -1,
1490 [ C(RESULT_MISS) ] = -1,
1491 },
1492 [ C(OP_PREFETCH) ] = {
1493 [ C(RESULT_ACCESS) ] = -1,
1494 [ C(RESULT_MISS) ] = -1,
1495 },
1496 },
1497 [ C(BPU ) ] = {
1498 [ C(OP_READ) ] = {
1499 [ C(RESULT_ACCESS) ] = 0x00c4,
1500 [ C(RESULT_MISS) ] = 0x00c5,
1501 },
1502 [ C(OP_WRITE) ] = {
1503 [ C(RESULT_ACCESS) ] = -1,
1504 [ C(RESULT_MISS) ] = -1,
1505 },
1506 [ C(OP_PREFETCH) ] = {
1507 [ C(RESULT_ACCESS) ] = -1,
1508 [ C(RESULT_MISS) ] = -1,
1509 },
1510 },
1511};
1512
1513static __initconst const u64 atom_hw_cache_event_ids
1514 [PERF_COUNT_HW_CACHE_MAX]
1515 [PERF_COUNT_HW_CACHE_OP_MAX]
1516 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1517{
1518 [ C(L1D) ] = {
1519 [ C(OP_READ) ] = {
1520 [ C(RESULT_ACCESS) ] = 0x2140,
1521 [ C(RESULT_MISS) ] = 0,
1522 },
1523 [ C(OP_WRITE) ] = {
1524 [ C(RESULT_ACCESS) ] = 0x2240,
1525 [ C(RESULT_MISS) ] = 0,
1526 },
1527 [ C(OP_PREFETCH) ] = {
1528 [ C(RESULT_ACCESS) ] = 0x0,
1529 [ C(RESULT_MISS) ] = 0,
1530 },
1531 },
1532 [ C(L1I ) ] = {
1533 [ C(OP_READ) ] = {
1534 [ C(RESULT_ACCESS) ] = 0x0380,
1535 [ C(RESULT_MISS) ] = 0x0280,
1536 },
1537 [ C(OP_WRITE) ] = {
1538 [ C(RESULT_ACCESS) ] = -1,
1539 [ C(RESULT_MISS) ] = -1,
1540 },
1541 [ C(OP_PREFETCH) ] = {
1542 [ C(RESULT_ACCESS) ] = 0,
1543 [ C(RESULT_MISS) ] = 0,
1544 },
1545 },
1546 [ C(LL ) ] = {
1547 [ C(OP_READ) ] = {
1548 [ C(RESULT_ACCESS) ] = 0x4f29,
1549 [ C(RESULT_MISS) ] = 0x4129,
1550 },
1551 [ C(OP_WRITE) ] = {
1552 [ C(RESULT_ACCESS) ] = 0x4f2A,
1553 [ C(RESULT_MISS) ] = 0x412A,
1554 },
1555 [ C(OP_PREFETCH) ] = {
1556 [ C(RESULT_ACCESS) ] = 0,
1557 [ C(RESULT_MISS) ] = 0,
1558 },
1559 },
1560 [ C(DTLB) ] = {
1561 [ C(OP_READ) ] = {
1562 [ C(RESULT_ACCESS) ] = 0x2140,
1563 [ C(RESULT_MISS) ] = 0x0508,
1564 },
1565 [ C(OP_WRITE) ] = {
1566 [ C(RESULT_ACCESS) ] = 0x2240,
1567 [ C(RESULT_MISS) ] = 0x0608,
1568 },
1569 [ C(OP_PREFETCH) ] = {
1570 [ C(RESULT_ACCESS) ] = 0,
1571 [ C(RESULT_MISS) ] = 0,
1572 },
1573 },
1574 [ C(ITLB) ] = {
1575 [ C(OP_READ) ] = {
1576 [ C(RESULT_ACCESS) ] = 0x00c0,
1577 [ C(RESULT_MISS) ] = 0x0282,
1578 },
1579 [ C(OP_WRITE) ] = {
1580 [ C(RESULT_ACCESS) ] = -1,
1581 [ C(RESULT_MISS) ] = -1,
1582 },
1583 [ C(OP_PREFETCH) ] = {
1584 [ C(RESULT_ACCESS) ] = -1,
1585 [ C(RESULT_MISS) ] = -1,
1586 },
1587 },
1588 [ C(BPU ) ] = {
1589 [ C(OP_READ) ] = {
1590 [ C(RESULT_ACCESS) ] = 0x00c4,
1591 [ C(RESULT_MISS) ] = 0x00c5,
1592 },
1593 [ C(OP_WRITE) ] = {
1594 [ C(RESULT_ACCESS) ] = -1,
1595 [ C(RESULT_MISS) ] = -1,
1596 },
1597 [ C(OP_PREFETCH) ] = {
1598 [ C(RESULT_ACCESS) ] = -1,
1599 [ C(RESULT_MISS) ] = -1,
1600 },
1601 },
1602};
1603
1604EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
1605EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
1606
1607EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
1608 "event=0xca,umask=0x50");
1609EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
1610
1611EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
1612 "event=0xc2,umask=0x10");
1613
1614EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
1615 "event=0xc2,umask=0x10");
1616
1617static struct attribute *slm_events_attrs[] = {
1618 EVENT_PTR(td_total_slots_slm),
1619 EVENT_PTR(td_total_slots_scale_slm),
1620 EVENT_PTR(td_fetch_bubbles_slm),
1621 EVENT_PTR(td_fetch_bubbles_scale_slm),
1622 EVENT_PTR(td_slots_issued_slm),
1623 EVENT_PTR(td_slots_retired_slm),
1624 NULL
1625};
1626
1627static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1628{
1629
1630 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1631 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x368005ffffull, RSP_1),
1632 EVENT_EXTRA_END
1633};
1634
1635#define SLM_DMND_READ SNB_DMND_DATA_RD
1636#define SLM_DMND_WRITE SNB_DMND_RFO
1637#define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1638
1639#define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1640#define SLM_LLC_ACCESS SNB_RESP_ANY
1641#define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1642
1643static __initconst const u64 slm_hw_cache_extra_regs
1644 [PERF_COUNT_HW_CACHE_MAX]
1645 [PERF_COUNT_HW_CACHE_OP_MAX]
1646 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1647{
1648 [ C(LL ) ] = {
1649 [ C(OP_READ) ] = {
1650 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
1651 [ C(RESULT_MISS) ] = 0,
1652 },
1653 [ C(OP_WRITE) ] = {
1654 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1655 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1656 },
1657 [ C(OP_PREFETCH) ] = {
1658 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1659 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1660 },
1661 },
1662};
1663
1664static __initconst const u64 slm_hw_cache_event_ids
1665 [PERF_COUNT_HW_CACHE_MAX]
1666 [PERF_COUNT_HW_CACHE_OP_MAX]
1667 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1668{
1669 [ C(L1D) ] = {
1670 [ C(OP_READ) ] = {
1671 [ C(RESULT_ACCESS) ] = 0,
1672 [ C(RESULT_MISS) ] = 0x0104,
1673 },
1674 [ C(OP_WRITE) ] = {
1675 [ C(RESULT_ACCESS) ] = 0,
1676 [ C(RESULT_MISS) ] = 0,
1677 },
1678 [ C(OP_PREFETCH) ] = {
1679 [ C(RESULT_ACCESS) ] = 0,
1680 [ C(RESULT_MISS) ] = 0,
1681 },
1682 },
1683 [ C(L1I ) ] = {
1684 [ C(OP_READ) ] = {
1685 [ C(RESULT_ACCESS) ] = 0x0380,
1686 [ C(RESULT_MISS) ] = 0x0280,
1687 },
1688 [ C(OP_WRITE) ] = {
1689 [ C(RESULT_ACCESS) ] = -1,
1690 [ C(RESULT_MISS) ] = -1,
1691 },
1692 [ C(OP_PREFETCH) ] = {
1693 [ C(RESULT_ACCESS) ] = 0,
1694 [ C(RESULT_MISS) ] = 0,
1695 },
1696 },
1697 [ C(LL ) ] = {
1698 [ C(OP_READ) ] = {
1699
1700 [ C(RESULT_ACCESS) ] = 0x01b7,
1701 [ C(RESULT_MISS) ] = 0,
1702 },
1703 [ C(OP_WRITE) ] = {
1704
1705 [ C(RESULT_ACCESS) ] = 0x01b7,
1706
1707 [ C(RESULT_MISS) ] = 0x01b7,
1708 },
1709 [ C(OP_PREFETCH) ] = {
1710
1711 [ C(RESULT_ACCESS) ] = 0x01b7,
1712
1713 [ C(RESULT_MISS) ] = 0x01b7,
1714 },
1715 },
1716 [ C(DTLB) ] = {
1717 [ C(OP_READ) ] = {
1718 [ C(RESULT_ACCESS) ] = 0,
1719 [ C(RESULT_MISS) ] = 0x0804,
1720 },
1721 [ C(OP_WRITE) ] = {
1722 [ C(RESULT_ACCESS) ] = 0,
1723 [ C(RESULT_MISS) ] = 0,
1724 },
1725 [ C(OP_PREFETCH) ] = {
1726 [ C(RESULT_ACCESS) ] = 0,
1727 [ C(RESULT_MISS) ] = 0,
1728 },
1729 },
1730 [ C(ITLB) ] = {
1731 [ C(OP_READ) ] = {
1732 [ C(RESULT_ACCESS) ] = 0x00c0,
1733 [ C(RESULT_MISS) ] = 0x40205,
1734 },
1735 [ C(OP_WRITE) ] = {
1736 [ C(RESULT_ACCESS) ] = -1,
1737 [ C(RESULT_MISS) ] = -1,
1738 },
1739 [ C(OP_PREFETCH) ] = {
1740 [ C(RESULT_ACCESS) ] = -1,
1741 [ C(RESULT_MISS) ] = -1,
1742 },
1743 },
1744 [ C(BPU ) ] = {
1745 [ C(OP_READ) ] = {
1746 [ C(RESULT_ACCESS) ] = 0x00c4,
1747 [ C(RESULT_MISS) ] = 0x00c5,
1748 },
1749 [ C(OP_WRITE) ] = {
1750 [ C(RESULT_ACCESS) ] = -1,
1751 [ C(RESULT_MISS) ] = -1,
1752 },
1753 [ C(OP_PREFETCH) ] = {
1754 [ C(RESULT_ACCESS) ] = -1,
1755 [ C(RESULT_MISS) ] = -1,
1756 },
1757 },
1758};
1759
1760EVENT_ATTR_STR(topdown-total-slots, td_total_slots_glm, "event=0x3c");
1761EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_glm, "3");
1762
1763EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_glm, "event=0x9c");
1764
1765EVENT_ATTR_STR(topdown-recovery-bubbles, td_recovery_bubbles_glm, "event=0xca,umask=0x02");
1766
1767EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_glm, "event=0xc2");
1768
1769EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_glm, "event=0x0e");
1770
1771static struct attribute *glm_events_attrs[] = {
1772 EVENT_PTR(td_total_slots_glm),
1773 EVENT_PTR(td_total_slots_scale_glm),
1774 EVENT_PTR(td_fetch_bubbles_glm),
1775 EVENT_PTR(td_recovery_bubbles_glm),
1776 EVENT_PTR(td_slots_issued_glm),
1777 EVENT_PTR(td_slots_retired_glm),
1778 NULL
1779};
1780
1781static struct extra_reg intel_glm_extra_regs[] __read_mostly = {
1782
1783 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x760005ffbfull, RSP_0),
1784 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x360005ffbfull, RSP_1),
1785 EVENT_EXTRA_END
1786};
1787
1788#define GLM_DEMAND_DATA_RD BIT_ULL(0)
1789#define GLM_DEMAND_RFO BIT_ULL(1)
1790#define GLM_ANY_RESPONSE BIT_ULL(16)
1791#define GLM_SNP_NONE_OR_MISS BIT_ULL(33)
1792#define GLM_DEMAND_READ GLM_DEMAND_DATA_RD
1793#define GLM_DEMAND_WRITE GLM_DEMAND_RFO
1794#define GLM_DEMAND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1795#define GLM_LLC_ACCESS GLM_ANY_RESPONSE
1796#define GLM_SNP_ANY (GLM_SNP_NONE_OR_MISS|SNB_NO_FWD|SNB_HITM)
1797#define GLM_LLC_MISS (GLM_SNP_ANY|SNB_NON_DRAM)
1798
1799static __initconst const u64 glm_hw_cache_event_ids
1800 [PERF_COUNT_HW_CACHE_MAX]
1801 [PERF_COUNT_HW_CACHE_OP_MAX]
1802 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1803 [C(L1D)] = {
1804 [C(OP_READ)] = {
1805 [C(RESULT_ACCESS)] = 0x81d0,
1806 [C(RESULT_MISS)] = 0x0,
1807 },
1808 [C(OP_WRITE)] = {
1809 [C(RESULT_ACCESS)] = 0x82d0,
1810 [C(RESULT_MISS)] = 0x0,
1811 },
1812 [C(OP_PREFETCH)] = {
1813 [C(RESULT_ACCESS)] = 0x0,
1814 [C(RESULT_MISS)] = 0x0,
1815 },
1816 },
1817 [C(L1I)] = {
1818 [C(OP_READ)] = {
1819 [C(RESULT_ACCESS)] = 0x0380,
1820 [C(RESULT_MISS)] = 0x0280,
1821 },
1822 [C(OP_WRITE)] = {
1823 [C(RESULT_ACCESS)] = -1,
1824 [C(RESULT_MISS)] = -1,
1825 },
1826 [C(OP_PREFETCH)] = {
1827 [C(RESULT_ACCESS)] = 0x0,
1828 [C(RESULT_MISS)] = 0x0,
1829 },
1830 },
1831 [C(LL)] = {
1832 [C(OP_READ)] = {
1833 [C(RESULT_ACCESS)] = 0x1b7,
1834 [C(RESULT_MISS)] = 0x1b7,
1835 },
1836 [C(OP_WRITE)] = {
1837 [C(RESULT_ACCESS)] = 0x1b7,
1838 [C(RESULT_MISS)] = 0x1b7,
1839 },
1840 [C(OP_PREFETCH)] = {
1841 [C(RESULT_ACCESS)] = 0x1b7,
1842 [C(RESULT_MISS)] = 0x1b7,
1843 },
1844 },
1845 [C(DTLB)] = {
1846 [C(OP_READ)] = {
1847 [C(RESULT_ACCESS)] = 0x81d0,
1848 [C(RESULT_MISS)] = 0x0,
1849 },
1850 [C(OP_WRITE)] = {
1851 [C(RESULT_ACCESS)] = 0x82d0,
1852 [C(RESULT_MISS)] = 0x0,
1853 },
1854 [C(OP_PREFETCH)] = {
1855 [C(RESULT_ACCESS)] = 0x0,
1856 [C(RESULT_MISS)] = 0x0,
1857 },
1858 },
1859 [C(ITLB)] = {
1860 [C(OP_READ)] = {
1861 [C(RESULT_ACCESS)] = 0x00c0,
1862 [C(RESULT_MISS)] = 0x0481,
1863 },
1864 [C(OP_WRITE)] = {
1865 [C(RESULT_ACCESS)] = -1,
1866 [C(RESULT_MISS)] = -1,
1867 },
1868 [C(OP_PREFETCH)] = {
1869 [C(RESULT_ACCESS)] = -1,
1870 [C(RESULT_MISS)] = -1,
1871 },
1872 },
1873 [C(BPU)] = {
1874 [C(OP_READ)] = {
1875 [C(RESULT_ACCESS)] = 0x00c4,
1876 [C(RESULT_MISS)] = 0x00c5,
1877 },
1878 [C(OP_WRITE)] = {
1879 [C(RESULT_ACCESS)] = -1,
1880 [C(RESULT_MISS)] = -1,
1881 },
1882 [C(OP_PREFETCH)] = {
1883 [C(RESULT_ACCESS)] = -1,
1884 [C(RESULT_MISS)] = -1,
1885 },
1886 },
1887};
1888
1889static __initconst const u64 glm_hw_cache_extra_regs
1890 [PERF_COUNT_HW_CACHE_MAX]
1891 [PERF_COUNT_HW_CACHE_OP_MAX]
1892 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1893 [C(LL)] = {
1894 [C(OP_READ)] = {
1895 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
1896 GLM_LLC_ACCESS,
1897 [C(RESULT_MISS)] = GLM_DEMAND_READ|
1898 GLM_LLC_MISS,
1899 },
1900 [C(OP_WRITE)] = {
1901 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
1902 GLM_LLC_ACCESS,
1903 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
1904 GLM_LLC_MISS,
1905 },
1906 [C(OP_PREFETCH)] = {
1907 [C(RESULT_ACCESS)] = GLM_DEMAND_PREFETCH|
1908 GLM_LLC_ACCESS,
1909 [C(RESULT_MISS)] = GLM_DEMAND_PREFETCH|
1910 GLM_LLC_MISS,
1911 },
1912 },
1913};
1914
1915static __initconst const u64 glp_hw_cache_event_ids
1916 [PERF_COUNT_HW_CACHE_MAX]
1917 [PERF_COUNT_HW_CACHE_OP_MAX]
1918 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1919 [C(L1D)] = {
1920 [C(OP_READ)] = {
1921 [C(RESULT_ACCESS)] = 0x81d0,
1922 [C(RESULT_MISS)] = 0x0,
1923 },
1924 [C(OP_WRITE)] = {
1925 [C(RESULT_ACCESS)] = 0x82d0,
1926 [C(RESULT_MISS)] = 0x0,
1927 },
1928 [C(OP_PREFETCH)] = {
1929 [C(RESULT_ACCESS)] = 0x0,
1930 [C(RESULT_MISS)] = 0x0,
1931 },
1932 },
1933 [C(L1I)] = {
1934 [C(OP_READ)] = {
1935 [C(RESULT_ACCESS)] = 0x0380,
1936 [C(RESULT_MISS)] = 0x0280,
1937 },
1938 [C(OP_WRITE)] = {
1939 [C(RESULT_ACCESS)] = -1,
1940 [C(RESULT_MISS)] = -1,
1941 },
1942 [C(OP_PREFETCH)] = {
1943 [C(RESULT_ACCESS)] = 0x0,
1944 [C(RESULT_MISS)] = 0x0,
1945 },
1946 },
1947 [C(LL)] = {
1948 [C(OP_READ)] = {
1949 [C(RESULT_ACCESS)] = 0x1b7,
1950 [C(RESULT_MISS)] = 0x1b7,
1951 },
1952 [C(OP_WRITE)] = {
1953 [C(RESULT_ACCESS)] = 0x1b7,
1954 [C(RESULT_MISS)] = 0x1b7,
1955 },
1956 [C(OP_PREFETCH)] = {
1957 [C(RESULT_ACCESS)] = 0x0,
1958 [C(RESULT_MISS)] = 0x0,
1959 },
1960 },
1961 [C(DTLB)] = {
1962 [C(OP_READ)] = {
1963 [C(RESULT_ACCESS)] = 0x81d0,
1964 [C(RESULT_MISS)] = 0xe08,
1965 },
1966 [C(OP_WRITE)] = {
1967 [C(RESULT_ACCESS)] = 0x82d0,
1968 [C(RESULT_MISS)] = 0xe49,
1969 },
1970 [C(OP_PREFETCH)] = {
1971 [C(RESULT_ACCESS)] = 0x0,
1972 [C(RESULT_MISS)] = 0x0,
1973 },
1974 },
1975 [C(ITLB)] = {
1976 [C(OP_READ)] = {
1977 [C(RESULT_ACCESS)] = 0x00c0,
1978 [C(RESULT_MISS)] = 0x0481,
1979 },
1980 [C(OP_WRITE)] = {
1981 [C(RESULT_ACCESS)] = -1,
1982 [C(RESULT_MISS)] = -1,
1983 },
1984 [C(OP_PREFETCH)] = {
1985 [C(RESULT_ACCESS)] = -1,
1986 [C(RESULT_MISS)] = -1,
1987 },
1988 },
1989 [C(BPU)] = {
1990 [C(OP_READ)] = {
1991 [C(RESULT_ACCESS)] = 0x00c4,
1992 [C(RESULT_MISS)] = 0x00c5,
1993 },
1994 [C(OP_WRITE)] = {
1995 [C(RESULT_ACCESS)] = -1,
1996 [C(RESULT_MISS)] = -1,
1997 },
1998 [C(OP_PREFETCH)] = {
1999 [C(RESULT_ACCESS)] = -1,
2000 [C(RESULT_MISS)] = -1,
2001 },
2002 },
2003};
2004
2005static __initconst const u64 glp_hw_cache_extra_regs
2006 [PERF_COUNT_HW_CACHE_MAX]
2007 [PERF_COUNT_HW_CACHE_OP_MAX]
2008 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2009 [C(LL)] = {
2010 [C(OP_READ)] = {
2011 [C(RESULT_ACCESS)] = GLM_DEMAND_READ|
2012 GLM_LLC_ACCESS,
2013 [C(RESULT_MISS)] = GLM_DEMAND_READ|
2014 GLM_LLC_MISS,
2015 },
2016 [C(OP_WRITE)] = {
2017 [C(RESULT_ACCESS)] = GLM_DEMAND_WRITE|
2018 GLM_LLC_ACCESS,
2019 [C(RESULT_MISS)] = GLM_DEMAND_WRITE|
2020 GLM_LLC_MISS,
2021 },
2022 [C(OP_PREFETCH)] = {
2023 [C(RESULT_ACCESS)] = 0x0,
2024 [C(RESULT_MISS)] = 0x0,
2025 },
2026 },
2027};
2028
2029#define TNT_LOCAL_DRAM BIT_ULL(26)
2030#define TNT_DEMAND_READ GLM_DEMAND_DATA_RD
2031#define TNT_DEMAND_WRITE GLM_DEMAND_RFO
2032#define TNT_LLC_ACCESS GLM_ANY_RESPONSE
2033#define TNT_SNP_ANY (SNB_SNP_NOT_NEEDED|SNB_SNP_MISS| \
2034 SNB_NO_FWD|SNB_SNP_FWD|SNB_HITM)
2035#define TNT_LLC_MISS (TNT_SNP_ANY|SNB_NON_DRAM|TNT_LOCAL_DRAM)
2036
2037static __initconst const u64 tnt_hw_cache_extra_regs
2038 [PERF_COUNT_HW_CACHE_MAX]
2039 [PERF_COUNT_HW_CACHE_OP_MAX]
2040 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2041 [C(LL)] = {
2042 [C(OP_READ)] = {
2043 [C(RESULT_ACCESS)] = TNT_DEMAND_READ|
2044 TNT_LLC_ACCESS,
2045 [C(RESULT_MISS)] = TNT_DEMAND_READ|
2046 TNT_LLC_MISS,
2047 },
2048 [C(OP_WRITE)] = {
2049 [C(RESULT_ACCESS)] = TNT_DEMAND_WRITE|
2050 TNT_LLC_ACCESS,
2051 [C(RESULT_MISS)] = TNT_DEMAND_WRITE|
2052 TNT_LLC_MISS,
2053 },
2054 [C(OP_PREFETCH)] = {
2055 [C(RESULT_ACCESS)] = 0x0,
2056 [C(RESULT_MISS)] = 0x0,
2057 },
2058 },
2059};
2060
2061EVENT_ATTR_STR(topdown-fe-bound, td_fe_bound_tnt, "event=0x71,umask=0x0");
2062EVENT_ATTR_STR(topdown-retiring, td_retiring_tnt, "event=0xc2,umask=0x0");
2063EVENT_ATTR_STR(topdown-bad-spec, td_bad_spec_tnt, "event=0x73,umask=0x6");
2064EVENT_ATTR_STR(topdown-be-bound, td_be_bound_tnt, "event=0x74,umask=0x0");
2065
2066static struct attribute *tnt_events_attrs[] = {
2067 EVENT_PTR(td_fe_bound_tnt),
2068 EVENT_PTR(td_retiring_tnt),
2069 EVENT_PTR(td_bad_spec_tnt),
2070 EVENT_PTR(td_be_bound_tnt),
2071 NULL,
2072};
2073
2074static struct extra_reg intel_tnt_extra_regs[] __read_mostly = {
2075
2076 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x800ff0ffffff9fffull, RSP_0),
2077 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0xff0ffffff9fffull, RSP_1),
2078 EVENT_EXTRA_END
2079};
2080
2081static struct extra_reg intel_grt_extra_regs[] __read_mostly = {
2082
2083 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
2084 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
2085 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x5d0),
2086 EVENT_EXTRA_END
2087};
2088
2089#define KNL_OT_L2_HITE BIT_ULL(19)
2090#define KNL_OT_L2_HITF BIT_ULL(20)
2091#define KNL_MCDRAM_LOCAL BIT_ULL(21)
2092#define KNL_MCDRAM_FAR BIT_ULL(22)
2093#define KNL_DDR_LOCAL BIT_ULL(23)
2094#define KNL_DDR_FAR BIT_ULL(24)
2095#define KNL_DRAM_ANY (KNL_MCDRAM_LOCAL | KNL_MCDRAM_FAR | \
2096 KNL_DDR_LOCAL | KNL_DDR_FAR)
2097#define KNL_L2_READ SLM_DMND_READ
2098#define KNL_L2_WRITE SLM_DMND_WRITE
2099#define KNL_L2_PREFETCH SLM_DMND_PREFETCH
2100#define KNL_L2_ACCESS SLM_LLC_ACCESS
2101#define KNL_L2_MISS (KNL_OT_L2_HITE | KNL_OT_L2_HITF | \
2102 KNL_DRAM_ANY | SNB_SNP_ANY | \
2103 SNB_NON_DRAM)
2104
2105static __initconst const u64 knl_hw_cache_extra_regs
2106 [PERF_COUNT_HW_CACHE_MAX]
2107 [PERF_COUNT_HW_CACHE_OP_MAX]
2108 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
2109 [C(LL)] = {
2110 [C(OP_READ)] = {
2111 [C(RESULT_ACCESS)] = KNL_L2_READ | KNL_L2_ACCESS,
2112 [C(RESULT_MISS)] = 0,
2113 },
2114 [C(OP_WRITE)] = {
2115 [C(RESULT_ACCESS)] = KNL_L2_WRITE | KNL_L2_ACCESS,
2116 [C(RESULT_MISS)] = KNL_L2_WRITE | KNL_L2_MISS,
2117 },
2118 [C(OP_PREFETCH)] = {
2119 [C(RESULT_ACCESS)] = KNL_L2_PREFETCH | KNL_L2_ACCESS,
2120 [C(RESULT_MISS)] = KNL_L2_PREFETCH | KNL_L2_MISS,
2121 },
2122 },
2123};
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146static void __intel_pmu_disable_all(void)
2147{
2148 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2149
2150 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2151
2152 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
2153 intel_pmu_disable_bts();
2154}
2155
2156static void intel_pmu_disable_all(void)
2157{
2158 __intel_pmu_disable_all();
2159 intel_pmu_pebs_disable_all();
2160 intel_pmu_lbr_disable_all();
2161}
2162
2163static void __intel_pmu_enable_all(int added, bool pmi)
2164{
2165 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2166 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2167
2168 intel_pmu_lbr_enable_all(pmi);
2169 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
2170 intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
2171
2172 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
2173 struct perf_event *event =
2174 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
2175
2176 if (WARN_ON_ONCE(!event))
2177 return;
2178
2179 intel_pmu_enable_bts(event->hw.config);
2180 }
2181}
2182
2183static void intel_pmu_enable_all(int added)
2184{
2185 intel_pmu_pebs_enable_all();
2186 __intel_pmu_enable_all(added, false);
2187}
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203static void intel_pmu_nhm_workaround(void)
2204{
2205 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2206 static const unsigned long nhm_magic[4] = {
2207 0x4300B5,
2208 0x4300D2,
2209 0x4300B1,
2210 0x4300B1
2211 };
2212 struct perf_event *event;
2213 int i;
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237 for (i = 0; i < 4; i++) {
2238 event = cpuc->events[i];
2239 if (event)
2240 x86_perf_event_update(event);
2241 }
2242
2243 for (i = 0; i < 4; i++) {
2244 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
2245 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
2246 }
2247
2248 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
2249 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
2250
2251 for (i = 0; i < 4; i++) {
2252 event = cpuc->events[i];
2253
2254 if (event) {
2255 x86_perf_event_set_period(event);
2256 __x86_pmu_enable_event(&event->hw,
2257 ARCH_PERFMON_EVENTSEL_ENABLE);
2258 } else
2259 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
2260 }
2261}
2262
2263static void intel_pmu_nhm_enable_all(int added)
2264{
2265 if (added)
2266 intel_pmu_nhm_workaround();
2267 intel_pmu_enable_all(added);
2268}
2269
2270static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
2271{
2272 u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
2273
2274 if (cpuc->tfa_shadow != val) {
2275 cpuc->tfa_shadow = val;
2276 wrmsrl(MSR_TSX_FORCE_ABORT, val);
2277 }
2278}
2279
2280static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
2281{
2282
2283
2284
2285 if (cntr == 3)
2286 intel_set_tfa(cpuc, true);
2287}
2288
2289static void intel_tfa_pmu_enable_all(int added)
2290{
2291 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2292
2293
2294
2295
2296
2297 if (!test_bit(3, cpuc->active_mask))
2298 intel_set_tfa(cpuc, false);
2299
2300 intel_pmu_enable_all(added);
2301}
2302
2303static inline u64 intel_pmu_get_status(void)
2304{
2305 u64 status;
2306
2307 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
2308
2309 return status;
2310}
2311
2312static inline void intel_pmu_ack_status(u64 ack)
2313{
2314 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
2315}
2316
2317static inline bool event_is_checkpointed(struct perf_event *event)
2318{
2319 return unlikely(event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
2320}
2321
2322static inline void intel_set_masks(struct perf_event *event, int idx)
2323{
2324 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2325
2326 if (event->attr.exclude_host)
2327 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2328 if (event->attr.exclude_guest)
2329 __set_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2330 if (event_is_checkpointed(event))
2331 __set_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2332}
2333
2334static inline void intel_clear_masks(struct perf_event *event, int idx)
2335{
2336 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2337
2338 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_guest_mask);
2339 __clear_bit(idx, (unsigned long *)&cpuc->intel_ctrl_host_mask);
2340 __clear_bit(idx, (unsigned long *)&cpuc->intel_cp_status);
2341}
2342
2343static void intel_pmu_disable_fixed(struct perf_event *event)
2344{
2345 struct hw_perf_event *hwc = &event->hw;
2346 u64 ctrl_val, mask;
2347 int idx = hwc->idx;
2348
2349 if (is_topdown_idx(idx)) {
2350 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2351
2352
2353
2354
2355
2356 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2357 return;
2358 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2359 }
2360
2361 intel_clear_masks(event, idx);
2362
2363 mask = 0xfULL << ((idx - INTEL_PMC_IDX_FIXED) * 4);
2364 rdmsrl(hwc->config_base, ctrl_val);
2365 ctrl_val &= ~mask;
2366 wrmsrl(hwc->config_base, ctrl_val);
2367}
2368
2369static void intel_pmu_disable_event(struct perf_event *event)
2370{
2371 struct hw_perf_event *hwc = &event->hw;
2372 int idx = hwc->idx;
2373
2374 switch (idx) {
2375 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2376 intel_clear_masks(event, idx);
2377 x86_pmu_disable_event(event);
2378 break;
2379 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2380 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2381 intel_pmu_disable_fixed(event);
2382 break;
2383 case INTEL_PMC_IDX_FIXED_BTS:
2384 intel_pmu_disable_bts();
2385 intel_pmu_drain_bts_buffer();
2386 return;
2387 case INTEL_PMC_IDX_FIXED_VLBR:
2388 intel_clear_masks(event, idx);
2389 break;
2390 default:
2391 intel_clear_masks(event, idx);
2392 pr_warn("Failed to disable the event with invalid index %d\n",
2393 idx);
2394 return;
2395 }
2396
2397
2398
2399
2400
2401 if (unlikely(event->attr.precise_ip))
2402 intel_pmu_pebs_disable(event);
2403}
2404
2405static void intel_pmu_del_event(struct perf_event *event)
2406{
2407 if (needs_branch_stack(event))
2408 intel_pmu_lbr_del(event);
2409 if (event->attr.precise_ip)
2410 intel_pmu_pebs_del(event);
2411}
2412
2413static int icl_set_topdown_event_period(struct perf_event *event)
2414{
2415 struct hw_perf_event *hwc = &event->hw;
2416 s64 left = local64_read(&hwc->period_left);
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426 if (left == x86_pmu.max_period) {
2427 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2428 wrmsrl(MSR_PERF_METRICS, 0);
2429 hwc->saved_slots = 0;
2430 hwc->saved_metric = 0;
2431 }
2432
2433 if ((hwc->saved_slots) && is_slots_event(event)) {
2434 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, hwc->saved_slots);
2435 wrmsrl(MSR_PERF_METRICS, hwc->saved_metric);
2436 }
2437
2438 perf_event_update_userpage(event);
2439
2440 return 0;
2441}
2442
2443static int adl_set_topdown_event_period(struct perf_event *event)
2444{
2445 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
2446
2447 if (pmu->cpu_type != hybrid_big)
2448 return 0;
2449
2450 return icl_set_topdown_event_period(event);
2451}
2452
2453static inline u64 icl_get_metrics_event_value(u64 metric, u64 slots, int idx)
2454{
2455 u32 val;
2456
2457
2458
2459
2460
2461
2462 val = (metric >> ((idx - INTEL_PMC_IDX_METRIC_BASE) * 8)) & 0xff;
2463 return mul_u64_u32_div(slots, val, 0xff);
2464}
2465
2466static u64 icl_get_topdown_value(struct perf_event *event,
2467 u64 slots, u64 metrics)
2468{
2469 int idx = event->hw.idx;
2470 u64 delta;
2471
2472 if (is_metric_idx(idx))
2473 delta = icl_get_metrics_event_value(metrics, slots, idx);
2474 else
2475 delta = slots;
2476
2477 return delta;
2478}
2479
2480static void __icl_update_topdown_event(struct perf_event *event,
2481 u64 slots, u64 metrics,
2482 u64 last_slots, u64 last_metrics)
2483{
2484 u64 delta, last = 0;
2485
2486 delta = icl_get_topdown_value(event, slots, metrics);
2487 if (last_slots)
2488 last = icl_get_topdown_value(event, last_slots, last_metrics);
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498 if (delta > last) {
2499 delta -= last;
2500 local64_add(delta, &event->count);
2501 }
2502}
2503
2504static void update_saved_topdown_regs(struct perf_event *event, u64 slots,
2505 u64 metrics, int metric_end)
2506{
2507 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2508 struct perf_event *other;
2509 int idx;
2510
2511 event->hw.saved_slots = slots;
2512 event->hw.saved_metric = metrics;
2513
2514 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2515 if (!is_topdown_idx(idx))
2516 continue;
2517 other = cpuc->events[idx];
2518 other->hw.saved_slots = slots;
2519 other->hw.saved_metric = metrics;
2520 }
2521}
2522
2523
2524
2525
2526
2527
2528
2529
2530static u64 intel_update_topdown_event(struct perf_event *event, int metric_end)
2531{
2532 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2533 struct perf_event *other;
2534 u64 slots, metrics;
2535 bool reset = true;
2536 int idx;
2537
2538
2539 rdpmcl((3 | INTEL_PMC_FIXED_RDPMC_BASE), slots);
2540 if (!slots)
2541 return 0;
2542
2543
2544 rdpmcl(INTEL_PMC_FIXED_RDPMC_METRICS, metrics);
2545
2546 for_each_set_bit(idx, cpuc->active_mask, metric_end + 1) {
2547 if (!is_topdown_idx(idx))
2548 continue;
2549 other = cpuc->events[idx];
2550 __icl_update_topdown_event(other, slots, metrics,
2551 event ? event->hw.saved_slots : 0,
2552 event ? event->hw.saved_metric : 0);
2553 }
2554
2555
2556
2557
2558
2559 if (event && !test_bit(event->hw.idx, cpuc->active_mask)) {
2560 __icl_update_topdown_event(event, slots, metrics,
2561 event->hw.saved_slots,
2562 event->hw.saved_metric);
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572 update_saved_topdown_regs(event, slots, metrics, metric_end);
2573 reset = false;
2574 }
2575
2576 if (reset) {
2577
2578 wrmsrl(MSR_CORE_PERF_FIXED_CTR3, 0);
2579 wrmsrl(MSR_PERF_METRICS, 0);
2580 if (event)
2581 update_saved_topdown_regs(event, 0, 0, metric_end);
2582 }
2583
2584 return slots;
2585}
2586
2587static u64 icl_update_topdown_event(struct perf_event *event)
2588{
2589 return intel_update_topdown_event(event, INTEL_PMC_IDX_METRIC_BASE +
2590 x86_pmu.num_topdown_events - 1);
2591}
2592
2593static u64 adl_update_topdown_event(struct perf_event *event)
2594{
2595 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
2596
2597 if (pmu->cpu_type != hybrid_big)
2598 return 0;
2599
2600 return icl_update_topdown_event(event);
2601}
2602
2603
2604static void intel_pmu_read_topdown_event(struct perf_event *event)
2605{
2606 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2607
2608
2609 if ((cpuc->txn_flags & PERF_PMU_TXN_READ) &&
2610 !is_slots_event(event))
2611 return;
2612
2613 perf_pmu_disable(event->pmu);
2614 x86_pmu.update_topdown_event(event);
2615 perf_pmu_enable(event->pmu);
2616}
2617
2618static void intel_pmu_read_event(struct perf_event *event)
2619{
2620 if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)
2621 intel_pmu_auto_reload_read(event);
2622 else if (is_topdown_count(event) && x86_pmu.update_topdown_event)
2623 intel_pmu_read_topdown_event(event);
2624 else
2625 x86_perf_event_update(event);
2626}
2627
2628static void intel_pmu_enable_fixed(struct perf_event *event)
2629{
2630 struct hw_perf_event *hwc = &event->hw;
2631 u64 ctrl_val, mask, bits = 0;
2632 int idx = hwc->idx;
2633
2634 if (is_topdown_idx(idx)) {
2635 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2636
2637
2638
2639
2640 if (*(u64 *)cpuc->active_mask & INTEL_PMC_OTHER_TOPDOWN_BITS(idx))
2641 return;
2642
2643 idx = INTEL_PMC_IDX_FIXED_SLOTS;
2644 }
2645
2646 intel_set_masks(event, idx);
2647
2648
2649
2650
2651
2652
2653 if (!event->attr.precise_ip)
2654 bits |= 0x8;
2655 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
2656 bits |= 0x2;
2657 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
2658 bits |= 0x1;
2659
2660
2661
2662
2663 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
2664 bits |= 0x4;
2665
2666 idx -= INTEL_PMC_IDX_FIXED;
2667 bits <<= (idx * 4);
2668 mask = 0xfULL << (idx * 4);
2669
2670 if (x86_pmu.intel_cap.pebs_baseline && event->attr.precise_ip) {
2671 bits |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2672 mask |= ICL_FIXED_0_ADAPTIVE << (idx * 4);
2673 }
2674
2675 rdmsrl(hwc->config_base, ctrl_val);
2676 ctrl_val &= ~mask;
2677 ctrl_val |= bits;
2678 wrmsrl(hwc->config_base, ctrl_val);
2679}
2680
2681static void intel_pmu_enable_event(struct perf_event *event)
2682{
2683 struct hw_perf_event *hwc = &event->hw;
2684 int idx = hwc->idx;
2685
2686 if (unlikely(event->attr.precise_ip))
2687 intel_pmu_pebs_enable(event);
2688
2689 switch (idx) {
2690 case 0 ... INTEL_PMC_IDX_FIXED - 1:
2691 intel_set_masks(event, idx);
2692 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2693 break;
2694 case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1:
2695 case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END:
2696 intel_pmu_enable_fixed(event);
2697 break;
2698 case INTEL_PMC_IDX_FIXED_BTS:
2699 if (!__this_cpu_read(cpu_hw_events.enabled))
2700 return;
2701 intel_pmu_enable_bts(hwc->config);
2702 break;
2703 case INTEL_PMC_IDX_FIXED_VLBR:
2704 intel_set_masks(event, idx);
2705 break;
2706 default:
2707 pr_warn("Failed to enable the event with invalid index %d\n",
2708 idx);
2709 }
2710}
2711
2712static void intel_pmu_add_event(struct perf_event *event)
2713{
2714 if (event->attr.precise_ip)
2715 intel_pmu_pebs_add(event);
2716 if (needs_branch_stack(event))
2717 intel_pmu_lbr_add(event);
2718}
2719
2720
2721
2722
2723
2724int intel_pmu_save_and_restart(struct perf_event *event)
2725{
2726 x86_perf_event_update(event);
2727
2728
2729
2730
2731
2732
2733 if (unlikely(event_is_checkpointed(event))) {
2734
2735 wrmsrl(event->hw.event_base, 0);
2736 local64_set(&event->hw.prev_count, 0);
2737 }
2738 return x86_perf_event_set_period(event);
2739}
2740
2741static void intel_pmu_reset(void)
2742{
2743 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
2744 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2745 int num_counters_fixed = hybrid(cpuc->pmu, num_counters_fixed);
2746 int num_counters = hybrid(cpuc->pmu, num_counters);
2747 unsigned long flags;
2748 int idx;
2749
2750 if (!num_counters)
2751 return;
2752
2753 local_irq_save(flags);
2754
2755 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
2756
2757 for (idx = 0; idx < num_counters; idx++) {
2758 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
2759 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
2760 }
2761 for (idx = 0; idx < num_counters_fixed; idx++) {
2762 if (fixed_counter_disabled(idx, cpuc->pmu))
2763 continue;
2764 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2765 }
2766
2767 if (ds)
2768 ds->bts_index = ds->bts_buffer_base;
2769
2770
2771 if (x86_pmu.version >= 2) {
2772 intel_pmu_ack_status(intel_pmu_get_status());
2773 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
2774 }
2775
2776
2777 if (x86_pmu.lbr_nr) {
2778 update_debugctlmsr(get_debugctlmsr() &
2779 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
2780 }
2781
2782 local_irq_restore(flags);
2783}
2784
2785static int handle_pmi_common(struct pt_regs *regs, u64 status)
2786{
2787 struct perf_sample_data data;
2788 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2789 int bit;
2790 int handled = 0;
2791 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
2792
2793 inc_irq_stat(apic_perf_irqs);
2794
2795
2796
2797
2798
2799 status &= ~(GLOBAL_STATUS_COND_CHG |
2800 GLOBAL_STATUS_ASIF |
2801 GLOBAL_STATUS_LBRS_FROZEN);
2802 if (!status)
2803 return 0;
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815
2816
2817
2818
2819
2820
2821
2822
2823
2824 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
2825 status &= ~cpuc->pebs_enabled;
2826 else
2827 status &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
2828
2829
2830
2831
2832 if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&status)) {
2833 u64 pebs_enabled = cpuc->pebs_enabled;
2834
2835 handled++;
2836 x86_pmu.drain_pebs(regs, &data);
2837 status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
2838
2839
2840
2841
2842
2843
2844
2845
2846 if (pebs_enabled != cpuc->pebs_enabled)
2847 wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
2848 }
2849
2850
2851
2852
2853 if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
2854 handled++;
2855 if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
2856 perf_guest_cbs->handle_intel_pt_intr))
2857 perf_guest_cbs->handle_intel_pt_intr();
2858 else
2859 intel_pt_interrupt();
2860 }
2861
2862
2863
2864
2865 if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) {
2866 handled++;
2867 if (x86_pmu.update_topdown_event)
2868 x86_pmu.update_topdown_event(NULL);
2869 }
2870
2871
2872
2873
2874
2875
2876 status |= cpuc->intel_cp_status;
2877
2878 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2879 struct perf_event *event = cpuc->events[bit];
2880
2881 handled++;
2882
2883 if (!test_bit(bit, cpuc->active_mask))
2884 continue;
2885
2886 if (!intel_pmu_save_and_restart(event))
2887 continue;
2888
2889 perf_sample_data_init(&data, 0, event->hw.last_period);
2890
2891 if (has_branch_stack(event))
2892 data.br_stack = &cpuc->lbr_stack;
2893
2894 if (perf_event_overflow(event, &data, regs))
2895 x86_pmu_stop(event, 0);
2896 }
2897
2898 return handled;
2899}
2900
2901
2902
2903
2904
2905static int intel_pmu_handle_irq(struct pt_regs *regs)
2906{
2907 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
2908 bool late_ack = hybrid_bit(cpuc->pmu, late_ack);
2909 bool mid_ack = hybrid_bit(cpuc->pmu, mid_ack);
2910 int loops;
2911 u64 status;
2912 int handled;
2913 int pmu_enabled;
2914
2915
2916
2917
2918
2919 pmu_enabled = cpuc->enabled;
2920
2921
2922
2923
2924
2925
2926
2927
2928 if (!late_ack && !mid_ack)
2929 apic_write(APIC_LVTPC, APIC_DM_NMI);
2930 intel_bts_disable_local();
2931 cpuc->enabled = 0;
2932 __intel_pmu_disable_all();
2933 handled = intel_pmu_drain_bts_buffer();
2934 handled += intel_bts_interrupt();
2935 status = intel_pmu_get_status();
2936 if (!status)
2937 goto done;
2938
2939 loops = 0;
2940again:
2941 intel_pmu_lbr_read();
2942 intel_pmu_ack_status(status);
2943 if (++loops > 100) {
2944 static bool warned;
2945
2946 if (!warned) {
2947 WARN(1, "perfevents: irq loop stuck!\n");
2948 perf_event_print_debug();
2949 warned = true;
2950 }
2951 intel_pmu_reset();
2952 goto done;
2953 }
2954
2955 handled += handle_pmi_common(regs, status);
2956
2957
2958
2959
2960 status = intel_pmu_get_status();
2961 if (status)
2962 goto again;
2963
2964done:
2965 if (mid_ack)
2966 apic_write(APIC_LVTPC, APIC_DM_NMI);
2967
2968 cpuc->enabled = pmu_enabled;
2969 if (pmu_enabled)
2970 __intel_pmu_enable_all(0, true);
2971 intel_bts_enable_local();
2972
2973
2974
2975
2976
2977
2978 if (late_ack)
2979 apic_write(APIC_LVTPC, APIC_DM_NMI);
2980 return handled;
2981}
2982
2983static struct event_constraint *
2984intel_bts_constraints(struct perf_event *event)
2985{
2986 if (unlikely(intel_pmu_has_bts(event)))
2987 return &bts_constraint;
2988
2989 return NULL;
2990}
2991
2992
2993
2994
2995static struct event_constraint *
2996intel_vlbr_constraints(struct perf_event *event)
2997{
2998 struct event_constraint *c = &vlbr_constraint;
2999
3000 if (unlikely(constraint_match(c, event->hw.config)))
3001 return c;
3002
3003 return NULL;
3004}
3005
3006static int intel_alt_er(struct cpu_hw_events *cpuc,
3007 int idx, u64 config)
3008{
3009 struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
3010 int alt_idx = idx;
3011
3012 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
3013 return idx;
3014
3015 if (idx == EXTRA_REG_RSP_0)
3016 alt_idx = EXTRA_REG_RSP_1;
3017
3018 if (idx == EXTRA_REG_RSP_1)
3019 alt_idx = EXTRA_REG_RSP_0;
3020
3021 if (config & ~extra_regs[alt_idx].valid_mask)
3022 return idx;
3023
3024 return alt_idx;
3025}
3026
3027static void intel_fixup_er(struct perf_event *event, int idx)
3028{
3029 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
3030 event->hw.extra_reg.idx = idx;
3031
3032 if (idx == EXTRA_REG_RSP_0) {
3033 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3034 event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event;
3035 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
3036 } else if (idx == EXTRA_REG_RSP_1) {
3037 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
3038 event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event;
3039 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
3040 }
3041}
3042
3043
3044
3045
3046
3047
3048
3049
3050static struct event_constraint *
3051__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
3052 struct perf_event *event,
3053 struct hw_perf_event_extra *reg)
3054{
3055 struct event_constraint *c = &emptyconstraint;
3056 struct er_account *era;
3057 unsigned long flags;
3058 int idx = reg->idx;
3059
3060
3061
3062
3063
3064
3065 if (reg->alloc && !cpuc->is_fake)
3066 return NULL;
3067
3068again:
3069 era = &cpuc->shared_regs->regs[idx];
3070
3071
3072
3073
3074 raw_spin_lock_irqsave(&era->lock, flags);
3075
3076 if (!atomic_read(&era->ref) || era->config == reg->config) {
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088 if (!cpuc->is_fake) {
3089 if (idx != reg->idx)
3090 intel_fixup_er(event, idx);
3091
3092
3093
3094
3095
3096
3097
3098 reg->alloc = 1;
3099 }
3100
3101
3102 era->config = reg->config;
3103 era->reg = reg->reg;
3104
3105
3106 atomic_inc(&era->ref);
3107
3108
3109
3110
3111
3112 c = NULL;
3113 } else {
3114 idx = intel_alt_er(cpuc, idx, reg->config);
3115 if (idx != reg->idx) {
3116 raw_spin_unlock_irqrestore(&era->lock, flags);
3117 goto again;
3118 }
3119 }
3120 raw_spin_unlock_irqrestore(&era->lock, flags);
3121
3122 return c;
3123}
3124
3125static void
3126__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
3127 struct hw_perf_event_extra *reg)
3128{
3129 struct er_account *era;
3130
3131
3132
3133
3134
3135
3136
3137
3138
3139 if (!reg->alloc || cpuc->is_fake)
3140 return;
3141
3142 era = &cpuc->shared_regs->regs[reg->idx];
3143
3144
3145 atomic_dec(&era->ref);
3146
3147
3148 reg->alloc = 0;
3149}
3150
3151static struct event_constraint *
3152intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
3153 struct perf_event *event)
3154{
3155 struct event_constraint *c = NULL, *d;
3156 struct hw_perf_event_extra *xreg, *breg;
3157
3158 xreg = &event->hw.extra_reg;
3159 if (xreg->idx != EXTRA_REG_NONE) {
3160 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
3161 if (c == &emptyconstraint)
3162 return c;
3163 }
3164 breg = &event->hw.branch_reg;
3165 if (breg->idx != EXTRA_REG_NONE) {
3166 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
3167 if (d == &emptyconstraint) {
3168 __intel_shared_reg_put_constraints(cpuc, xreg);
3169 c = d;
3170 }
3171 }
3172 return c;
3173}
3174
3175struct event_constraint *
3176x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3177 struct perf_event *event)
3178{
3179 struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints);
3180 struct event_constraint *c;
3181
3182 if (event_constraints) {
3183 for_each_event_constraint(c, event_constraints) {
3184 if (constraint_match(c, event->hw.config)) {
3185 event->hw.flags |= c->flags;
3186 return c;
3187 }
3188 }
3189 }
3190
3191 return &hybrid_var(cpuc->pmu, unconstrained);
3192}
3193
3194static struct event_constraint *
3195__intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3196 struct perf_event *event)
3197{
3198 struct event_constraint *c;
3199
3200 c = intel_vlbr_constraints(event);
3201 if (c)
3202 return c;
3203
3204 c = intel_bts_constraints(event);
3205 if (c)
3206 return c;
3207
3208 c = intel_shared_regs_constraints(cpuc, event);
3209 if (c)
3210 return c;
3211
3212 c = intel_pebs_constraints(event);
3213 if (c)
3214 return c;
3215
3216 return x86_get_event_constraints(cpuc, idx, event);
3217}
3218
3219static void
3220intel_start_scheduling(struct cpu_hw_events *cpuc)
3221{
3222 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3223 struct intel_excl_states *xl;
3224 int tid = cpuc->excl_thread_id;
3225
3226
3227
3228
3229 if (cpuc->is_fake || !is_ht_workaround_enabled())
3230 return;
3231
3232
3233
3234
3235 if (WARN_ON_ONCE(!excl_cntrs))
3236 return;
3237
3238 xl = &excl_cntrs->states[tid];
3239
3240 xl->sched_started = true;
3241
3242
3243
3244
3245
3246 raw_spin_lock(&excl_cntrs->lock);
3247}
3248
3249static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
3250{
3251 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3252 struct event_constraint *c = cpuc->event_constraint[idx];
3253 struct intel_excl_states *xl;
3254 int tid = cpuc->excl_thread_id;
3255
3256 if (cpuc->is_fake || !is_ht_workaround_enabled())
3257 return;
3258
3259 if (WARN_ON_ONCE(!excl_cntrs))
3260 return;
3261
3262 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
3263 return;
3264
3265 xl = &excl_cntrs->states[tid];
3266
3267 lockdep_assert_held(&excl_cntrs->lock);
3268
3269 if (c->flags & PERF_X86_EVENT_EXCL)
3270 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
3271 else
3272 xl->state[cntr] = INTEL_EXCL_SHARED;
3273}
3274
3275static void
3276intel_stop_scheduling(struct cpu_hw_events *cpuc)
3277{
3278 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3279 struct intel_excl_states *xl;
3280 int tid = cpuc->excl_thread_id;
3281
3282
3283
3284
3285 if (cpuc->is_fake || !is_ht_workaround_enabled())
3286 return;
3287
3288
3289
3290 if (WARN_ON_ONCE(!excl_cntrs))
3291 return;
3292
3293 xl = &excl_cntrs->states[tid];
3294
3295 xl->sched_started = false;
3296
3297
3298
3299 raw_spin_unlock(&excl_cntrs->lock);
3300}
3301
3302static struct event_constraint *
3303dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
3304{
3305 WARN_ON_ONCE(!cpuc->constraint_list);
3306
3307 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
3308 struct event_constraint *cx;
3309
3310
3311
3312
3313 cx = &cpuc->constraint_list[idx];
3314
3315
3316
3317
3318
3319 *cx = *c;
3320
3321
3322
3323
3324 cx->flags |= PERF_X86_EVENT_DYNAMIC;
3325 c = cx;
3326 }
3327
3328 return c;
3329}
3330
3331static struct event_constraint *
3332intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
3333 int idx, struct event_constraint *c)
3334{
3335 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3336 struct intel_excl_states *xlo;
3337 int tid = cpuc->excl_thread_id;
3338 int is_excl, i, w;
3339
3340
3341
3342
3343
3344 if (cpuc->is_fake || !is_ht_workaround_enabled())
3345 return c;
3346
3347
3348
3349
3350 if (WARN_ON_ONCE(!excl_cntrs))
3351 return c;
3352
3353
3354
3355
3356
3357
3358
3359
3360
3361 c = dyn_constraint(cpuc, c, idx);
3362
3363
3364
3365
3366
3367
3368
3369
3370
3371
3372
3373 xlo = &excl_cntrs->states[tid ^ 1];
3374
3375
3376
3377
3378
3379 is_excl = c->flags & PERF_X86_EVENT_EXCL;
3380 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
3381 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
3382 if (!cpuc->n_excl++)
3383 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
3384 }
3385
3386
3387
3388
3389
3390
3391
3392
3393
3394 w = c->weight;
3395 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
3396
3397
3398
3399
3400
3401 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE) {
3402 __clear_bit(i, c->idxmsk);
3403 w--;
3404 continue;
3405 }
3406
3407
3408
3409
3410
3411 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED) {
3412 __clear_bit(i, c->idxmsk);
3413 w--;
3414 continue;
3415 }
3416 }
3417
3418
3419
3420
3421
3422
3423 if (!w)
3424 c = &emptyconstraint;
3425
3426 c->weight = w;
3427
3428 return c;
3429}
3430
3431static struct event_constraint *
3432intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3433 struct perf_event *event)
3434{
3435 struct event_constraint *c1, *c2;
3436
3437 c1 = cpuc->event_constraint[idx];
3438
3439
3440
3441
3442
3443
3444 c2 = __intel_get_event_constraints(cpuc, idx, event);
3445 if (c1) {
3446 WARN_ON_ONCE(!(c1->flags & PERF_X86_EVENT_DYNAMIC));
3447 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
3448 c1->weight = c2->weight;
3449 c2 = c1;
3450 }
3451
3452 if (cpuc->excl_cntrs)
3453 return intel_get_excl_constraints(cpuc, event, idx, c2);
3454
3455 return c2;
3456}
3457
3458static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
3459 struct perf_event *event)
3460{
3461 struct hw_perf_event *hwc = &event->hw;
3462 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
3463 int tid = cpuc->excl_thread_id;
3464 struct intel_excl_states *xl;
3465
3466
3467
3468
3469 if (cpuc->is_fake)
3470 return;
3471
3472 if (WARN_ON_ONCE(!excl_cntrs))
3473 return;
3474
3475 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
3476 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
3477 if (!--cpuc->n_excl)
3478 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
3479 }
3480
3481
3482
3483
3484
3485 if (hwc->idx >= 0) {
3486 xl = &excl_cntrs->states[tid];
3487
3488
3489
3490
3491
3492
3493 if (!xl->sched_started)
3494 raw_spin_lock(&excl_cntrs->lock);
3495
3496 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
3497
3498 if (!xl->sched_started)
3499 raw_spin_unlock(&excl_cntrs->lock);
3500 }
3501}
3502
3503static void
3504intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
3505 struct perf_event *event)
3506{
3507 struct hw_perf_event_extra *reg;
3508
3509 reg = &event->hw.extra_reg;
3510 if (reg->idx != EXTRA_REG_NONE)
3511 __intel_shared_reg_put_constraints(cpuc, reg);
3512
3513 reg = &event->hw.branch_reg;
3514 if (reg->idx != EXTRA_REG_NONE)
3515 __intel_shared_reg_put_constraints(cpuc, reg);
3516}
3517
3518static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
3519 struct perf_event *event)
3520{
3521 intel_put_shared_regs_event_constraints(cpuc, event);
3522
3523
3524
3525
3526
3527
3528 if (cpuc->excl_cntrs)
3529 intel_put_excl_constraints(cpuc, event);
3530}
3531
3532static void intel_pebs_aliases_core2(struct perf_event *event)
3533{
3534 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
3554
3555 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3556 event->hw.config = alt_config;
3557 }
3558}
3559
3560static void intel_pebs_aliases_snb(struct perf_event *event)
3561{
3562 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3563
3564
3565
3566
3567
3568
3569
3570
3571
3572
3573
3574
3575
3576
3577
3578
3579
3580
3581 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
3582
3583 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3584 event->hw.config = alt_config;
3585 }
3586}
3587
3588static void intel_pebs_aliases_precdist(struct perf_event *event)
3589{
3590 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605 u64 alt_config = X86_CONFIG(.event=0xc0, .umask=0x01, .inv=1, .cmask=16);
3606
3607 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
3608 event->hw.config = alt_config;
3609 }
3610}
3611
3612static void intel_pebs_aliases_ivb(struct perf_event *event)
3613{
3614 if (event->attr.precise_ip < 3)
3615 return intel_pebs_aliases_snb(event);
3616 return intel_pebs_aliases_precdist(event);
3617}
3618
3619static void intel_pebs_aliases_skl(struct perf_event *event)
3620{
3621 if (event->attr.precise_ip < 3)
3622 return intel_pebs_aliases_core2(event);
3623 return intel_pebs_aliases_precdist(event);
3624}
3625
3626static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event)
3627{
3628 unsigned long flags = x86_pmu.large_pebs_flags;
3629
3630 if (event->attr.use_clockid)
3631 flags &= ~PERF_SAMPLE_TIME;
3632 if (!event->attr.exclude_kernel)
3633 flags &= ~PERF_SAMPLE_REGS_USER;
3634 if (event->attr.sample_regs_user & ~PEBS_GP_REGS)
3635 flags &= ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR);
3636 return flags;
3637}
3638
3639static int intel_pmu_bts_config(struct perf_event *event)
3640{
3641 struct perf_event_attr *attr = &event->attr;
3642
3643 if (unlikely(intel_pmu_has_bts(event))) {
3644
3645 if (!x86_pmu.bts_active)
3646 return -EOPNOTSUPP;
3647
3648
3649 if (!attr->exclude_kernel)
3650 return -EOPNOTSUPP;
3651
3652
3653 if (attr->precise_ip)
3654 return -EOPNOTSUPP;
3655
3656
3657 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3658 return -EBUSY;
3659
3660 event->destroy = hw_perf_lbr_event_destroy;
3661 }
3662
3663 return 0;
3664}
3665
3666static int core_pmu_hw_config(struct perf_event *event)
3667{
3668 int ret = x86_pmu_hw_config(event);
3669
3670 if (ret)
3671 return ret;
3672
3673 return intel_pmu_bts_config(event);
3674}
3675
3676#define INTEL_TD_METRIC_AVAILABLE_MAX (INTEL_TD_METRIC_RETIRING + \
3677 ((x86_pmu.num_topdown_events - 1) << 8))
3678
3679static bool is_available_metric_event(struct perf_event *event)
3680{
3681 return is_metric_event(event) &&
3682 event->attr.config <= INTEL_TD_METRIC_AVAILABLE_MAX;
3683}
3684
3685static inline bool is_mem_loads_event(struct perf_event *event)
3686{
3687 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xcd, .umask=0x01);
3688}
3689
3690static inline bool is_mem_loads_aux_event(struct perf_event *event)
3691{
3692 return (event->attr.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0x03, .umask=0x82);
3693}
3694
3695static inline bool require_mem_loads_aux_event(struct perf_event *event)
3696{
3697 if (!(x86_pmu.flags & PMU_FL_MEM_LOADS_AUX))
3698 return false;
3699
3700 if (is_hybrid())
3701 return hybrid_pmu(event->pmu)->cpu_type == hybrid_big;
3702
3703 return true;
3704}
3705
3706static inline bool intel_pmu_has_cap(struct perf_event *event, int idx)
3707{
3708 union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap);
3709
3710 return test_bit(idx, (unsigned long *)&intel_cap->capabilities);
3711}
3712
3713static int intel_pmu_hw_config(struct perf_event *event)
3714{
3715 int ret = x86_pmu_hw_config(event);
3716
3717 if (ret)
3718 return ret;
3719
3720 ret = intel_pmu_bts_config(event);
3721 if (ret)
3722 return ret;
3723
3724 if (event->attr.precise_ip) {
3725 if ((event->attr.config & INTEL_ARCH_EVENT_MASK) == INTEL_FIXED_VLBR_EVENT)
3726 return -EINVAL;
3727
3728 if (!(event->attr.freq || (event->attr.wakeup_events && !event->attr.watermark))) {
3729 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
3730 if (!(event->attr.sample_type &
3731 ~intel_pmu_large_pebs_flags(event))) {
3732 event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS;
3733 event->attach_state |= PERF_ATTACH_SCHED_CB;
3734 }
3735 }
3736 if (x86_pmu.pebs_aliases)
3737 x86_pmu.pebs_aliases(event);
3738
3739 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3740 event->attr.sample_type |= __PERF_SAMPLE_CALLCHAIN_EARLY;
3741 }
3742
3743 if (needs_branch_stack(event)) {
3744 ret = intel_pmu_setup_lbr_filter(event);
3745 if (ret)
3746 return ret;
3747 event->attach_state |= PERF_ATTACH_SCHED_CB;
3748
3749
3750
3751
3752 if (!unlikely(intel_pmu_has_bts(event))) {
3753
3754 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
3755 return -EBUSY;
3756
3757 event->destroy = hw_perf_lbr_event_destroy;
3758 }
3759 }
3760
3761 if (event->attr.aux_output) {
3762 if (!event->attr.precise_ip)
3763 return -EINVAL;
3764
3765 event->hw.flags |= PERF_X86_EVENT_PEBS_VIA_PT;
3766 }
3767
3768 if ((event->attr.type == PERF_TYPE_HARDWARE) ||
3769 (event->attr.type == PERF_TYPE_HW_CACHE))
3770 return 0;
3771
3772
3773
3774
3775
3776
3777
3778
3779
3780
3781
3782 if (intel_pmu_has_cap(event, PERF_CAP_METRICS_IDX) && is_topdown_event(event)) {
3783 if (event->attr.config1 || event->attr.config2)
3784 return -EINVAL;
3785
3786
3787
3788
3789
3790 if (event->attr.config & X86_ALL_EVENT_FLAGS)
3791 return -EINVAL;
3792
3793 if (is_available_metric_event(event)) {
3794 struct perf_event *leader = event->group_leader;
3795
3796
3797 if (is_sampling_event(event))
3798 return -EINVAL;
3799
3800
3801 if (!is_slots_event(leader))
3802 return -EINVAL;
3803
3804
3805
3806
3807
3808
3809 if (is_sampling_event(leader))
3810 return -EINVAL;
3811
3812 event->event_caps |= PERF_EV_CAP_SIBLING;
3813
3814
3815
3816
3817 leader->hw.flags |= PERF_X86_EVENT_TOPDOWN;
3818 event->hw.flags |= PERF_X86_EVENT_TOPDOWN;
3819 }
3820 }
3821
3822
3823
3824
3825
3826
3827
3828
3829
3830
3831
3832
3833 if (require_mem_loads_aux_event(event) &&
3834 (event->attr.sample_type & PERF_SAMPLE_DATA_SRC) &&
3835 is_mem_loads_event(event)) {
3836 struct perf_event *leader = event->group_leader;
3837 struct perf_event *sibling = NULL;
3838
3839 if (!is_mem_loads_aux_event(leader)) {
3840 for_each_sibling_event(sibling, leader) {
3841 if (is_mem_loads_aux_event(sibling))
3842 break;
3843 }
3844 if (list_entry_is_head(sibling, &leader->sibling_list, sibling_list))
3845 return -ENODATA;
3846 }
3847 }
3848
3849 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
3850 return 0;
3851
3852 if (x86_pmu.version < 3)
3853 return -EINVAL;
3854
3855 ret = perf_allow_cpu(&event->attr);
3856 if (ret)
3857 return ret;
3858
3859 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
3860
3861 return 0;
3862}
3863
3864static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
3865{
3866 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3867 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3868 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
3869
3870 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
3871 arr[0].host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
3872 arr[0].guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask;
3873 if (x86_pmu.flags & PMU_FL_PEBS_ALL)
3874 arr[0].guest &= ~cpuc->pebs_enabled;
3875 else
3876 arr[0].guest &= ~(cpuc->pebs_enabled & PEBS_COUNTER_MASK);
3877 *nr = 1;
3878
3879 if (x86_pmu.pebs && x86_pmu.pebs_no_isolation) {
3880
3881
3882
3883
3884
3885
3886
3887
3888 arr[1].msr = MSR_IA32_PEBS_ENABLE;
3889 arr[1].host = cpuc->pebs_enabled;
3890 arr[1].guest = 0;
3891 *nr = 2;
3892 }
3893
3894 return arr;
3895}
3896
3897static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
3898{
3899 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3900 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
3901 int idx;
3902
3903 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3904 struct perf_event *event = cpuc->events[idx];
3905
3906 arr[idx].msr = x86_pmu_config_addr(idx);
3907 arr[idx].host = arr[idx].guest = 0;
3908
3909 if (!test_bit(idx, cpuc->active_mask))
3910 continue;
3911
3912 arr[idx].host = arr[idx].guest =
3913 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
3914
3915 if (event->attr.exclude_host)
3916 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3917 else if (event->attr.exclude_guest)
3918 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
3919 }
3920
3921 *nr = x86_pmu.num_counters;
3922 return arr;
3923}
3924
3925static void core_pmu_enable_event(struct perf_event *event)
3926{
3927 if (!event->attr.exclude_host)
3928 x86_pmu_enable_event(event);
3929}
3930
3931static void core_pmu_enable_all(int added)
3932{
3933 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
3934 int idx;
3935
3936 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
3937 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
3938
3939 if (!test_bit(idx, cpuc->active_mask) ||
3940 cpuc->events[idx]->attr.exclude_host)
3941 continue;
3942
3943 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
3944 }
3945}
3946
3947static int hsw_hw_config(struct perf_event *event)
3948{
3949 int ret = intel_pmu_hw_config(event);
3950
3951 if (ret)
3952 return ret;
3953 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
3954 return 0;
3955 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
3956
3957
3958
3959
3960
3961
3962 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
3963 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
3964 event->attr.precise_ip > 0))
3965 return -EOPNOTSUPP;
3966
3967 if (event_is_checkpointed(event)) {
3968
3969
3970
3971
3972
3973
3974
3975
3976
3977 if (event->attr.sample_period > 0 &&
3978 event->attr.sample_period < 0x7fffffff)
3979 return -EOPNOTSUPP;
3980 }
3981 return 0;
3982}
3983
3984static struct event_constraint counter0_constraint =
3985 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1);
3986
3987static struct event_constraint counter2_constraint =
3988 EVENT_CONSTRAINT(0, 0x4, 0);
3989
3990static struct event_constraint fixed0_constraint =
3991 FIXED_EVENT_CONSTRAINT(0x00c0, 0);
3992
3993static struct event_constraint fixed0_counter0_constraint =
3994 INTEL_ALL_EVENT_CONSTRAINT(0, 0x100000001ULL);
3995
3996static struct event_constraint *
3997hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
3998 struct perf_event *event)
3999{
4000 struct event_constraint *c;
4001
4002 c = intel_get_event_constraints(cpuc, idx, event);
4003
4004
4005 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
4006 if (c->idxmsk64 & (1U << 2))
4007 return &counter2_constraint;
4008 return &emptyconstraint;
4009 }
4010
4011 return c;
4012}
4013
4014static struct event_constraint *
4015icl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4016 struct perf_event *event)
4017{
4018
4019
4020
4021
4022 if ((event->attr.precise_ip == 3) &&
4023 constraint_match(&fixed0_constraint, event->hw.config))
4024 return &fixed0_constraint;
4025
4026 return hsw_get_event_constraints(cpuc, idx, event);
4027}
4028
4029static struct event_constraint *
4030spr_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4031 struct perf_event *event)
4032{
4033 struct event_constraint *c;
4034
4035 c = icl_get_event_constraints(cpuc, idx, event);
4036
4037
4038
4039
4040
4041
4042
4043 if ((event->attr.precise_ip == 3) &&
4044 !constraint_match(&fixed0_constraint, event->hw.config)) {
4045 if (c->idxmsk64 & BIT_ULL(0))
4046 return &counter0_constraint;
4047
4048 return &emptyconstraint;
4049 }
4050
4051 return c;
4052}
4053
4054static struct event_constraint *
4055glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4056 struct perf_event *event)
4057{
4058 struct event_constraint *c;
4059
4060
4061 if (event->attr.precise_ip == 3)
4062 return &counter0_constraint;
4063
4064 c = intel_get_event_constraints(cpuc, idx, event);
4065
4066 return c;
4067}
4068
4069static struct event_constraint *
4070tnt_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4071 struct perf_event *event)
4072{
4073 struct event_constraint *c;
4074
4075
4076
4077
4078
4079 if (event->attr.precise_ip == 3) {
4080
4081 if (constraint_match(&fixed0_constraint, event->hw.config))
4082 return &fixed0_counter0_constraint;
4083
4084 return &counter0_constraint;
4085 }
4086
4087 c = intel_get_event_constraints(cpuc, idx, event);
4088
4089 return c;
4090}
4091
4092static bool allow_tsx_force_abort = true;
4093
4094static struct event_constraint *
4095tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4096 struct perf_event *event)
4097{
4098 struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
4099
4100
4101
4102
4103 if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
4104 c = dyn_constraint(cpuc, c, idx);
4105 c->idxmsk64 &= ~(1ULL << 3);
4106 c->weight--;
4107 }
4108
4109 return c;
4110}
4111
4112static struct event_constraint *
4113adl_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
4114 struct perf_event *event)
4115{
4116 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4117
4118 if (pmu->cpu_type == hybrid_big)
4119 return spr_get_event_constraints(cpuc, idx, event);
4120 else if (pmu->cpu_type == hybrid_small)
4121 return tnt_get_event_constraints(cpuc, idx, event);
4122
4123 WARN_ON(1);
4124 return &emptyconstraint;
4125}
4126
4127static int adl_hw_config(struct perf_event *event)
4128{
4129 struct x86_hybrid_pmu *pmu = hybrid_pmu(event->pmu);
4130
4131 if (pmu->cpu_type == hybrid_big)
4132 return hsw_hw_config(event);
4133 else if (pmu->cpu_type == hybrid_small)
4134 return intel_pmu_hw_config(event);
4135
4136 WARN_ON(1);
4137 return -EOPNOTSUPP;
4138}
4139
4140static u8 adl_get_hybrid_cpu_type(void)
4141{
4142 return hybrid_big;
4143}
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160static u64 bdw_limit_period(struct perf_event *event, u64 left)
4161{
4162 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
4163 X86_CONFIG(.event=0xc0, .umask=0x01)) {
4164 if (left < 128)
4165 left = 128;
4166 left &= ~0x3fULL;
4167 }
4168 return left;
4169}
4170
4171static u64 nhm_limit_period(struct perf_event *event, u64 left)
4172{
4173 return max(left, 32ULL);
4174}
4175
4176static u64 spr_limit_period(struct perf_event *event, u64 left)
4177{
4178 if (event->attr.precise_ip == 3)
4179 return max(left, 128ULL);
4180
4181 return left;
4182}
4183
4184PMU_FORMAT_ATTR(event, "config:0-7" );
4185PMU_FORMAT_ATTR(umask, "config:8-15" );
4186PMU_FORMAT_ATTR(edge, "config:18" );
4187PMU_FORMAT_ATTR(pc, "config:19" );
4188PMU_FORMAT_ATTR(any, "config:21" );
4189PMU_FORMAT_ATTR(inv, "config:23" );
4190PMU_FORMAT_ATTR(cmask, "config:24-31" );
4191PMU_FORMAT_ATTR(in_tx, "config:32");
4192PMU_FORMAT_ATTR(in_tx_cp, "config:33");
4193
4194static struct attribute *intel_arch_formats_attr[] = {
4195 &format_attr_event.attr,
4196 &format_attr_umask.attr,
4197 &format_attr_edge.attr,
4198 &format_attr_pc.attr,
4199 &format_attr_inv.attr,
4200 &format_attr_cmask.attr,
4201 NULL,
4202};
4203
4204ssize_t intel_event_sysfs_show(char *page, u64 config)
4205{
4206 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
4207
4208 return x86_event_sysfs_show(page, config, event);
4209}
4210
4211static struct intel_shared_regs *allocate_shared_regs(int cpu)
4212{
4213 struct intel_shared_regs *regs;
4214 int i;
4215
4216 regs = kzalloc_node(sizeof(struct intel_shared_regs),
4217 GFP_KERNEL, cpu_to_node(cpu));
4218 if (regs) {
4219
4220
4221
4222 for (i = 0; i < EXTRA_REG_MAX; i++)
4223 raw_spin_lock_init(®s->regs[i].lock);
4224
4225 regs->core_id = -1;
4226 }
4227 return regs;
4228}
4229
4230static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
4231{
4232 struct intel_excl_cntrs *c;
4233
4234 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
4235 GFP_KERNEL, cpu_to_node(cpu));
4236 if (c) {
4237 raw_spin_lock_init(&c->lock);
4238 c->core_id = -1;
4239 }
4240 return c;
4241}
4242
4243
4244int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
4245{
4246 cpuc->pebs_record_size = x86_pmu.pebs_record_size;
4247
4248 if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
4249 cpuc->shared_regs = allocate_shared_regs(cpu);
4250 if (!cpuc->shared_regs)
4251 goto err;
4252 }
4253
4254 if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
4255 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
4256
4257 cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
4258 if (!cpuc->constraint_list)
4259 goto err_shared_regs;
4260 }
4261
4262 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4263 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
4264 if (!cpuc->excl_cntrs)
4265 goto err_constraint_list;
4266
4267 cpuc->excl_thread_id = 0;
4268 }
4269
4270 return 0;
4271
4272err_constraint_list:
4273 kfree(cpuc->constraint_list);
4274 cpuc->constraint_list = NULL;
4275
4276err_shared_regs:
4277 kfree(cpuc->shared_regs);
4278 cpuc->shared_regs = NULL;
4279
4280err:
4281 return -ENOMEM;
4282}
4283
4284static int intel_pmu_cpu_prepare(int cpu)
4285{
4286 return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
4287}
4288
4289static void flip_smm_bit(void *data)
4290{
4291 unsigned long set = *(unsigned long *)data;
4292
4293 if (set > 0) {
4294 msr_set_bit(MSR_IA32_DEBUGCTLMSR,
4295 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4296 } else {
4297 msr_clear_bit(MSR_IA32_DEBUGCTLMSR,
4298 DEBUGCTLMSR_FREEZE_IN_SMM_BIT);
4299 }
4300}
4301
4302static bool init_hybrid_pmu(int cpu)
4303{
4304 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4305 u8 cpu_type = get_this_hybrid_cpu_type();
4306 struct x86_hybrid_pmu *pmu = NULL;
4307 int i;
4308
4309 if (!cpu_type && x86_pmu.get_hybrid_cpu_type)
4310 cpu_type = x86_pmu.get_hybrid_cpu_type();
4311
4312 for (i = 0; i < x86_pmu.num_hybrid_pmus; i++) {
4313 if (x86_pmu.hybrid_pmu[i].cpu_type == cpu_type) {
4314 pmu = &x86_pmu.hybrid_pmu[i];
4315 break;
4316 }
4317 }
4318 if (WARN_ON_ONCE(!pmu || (pmu->pmu.type == -1))) {
4319 cpuc->pmu = NULL;
4320 return false;
4321 }
4322
4323
4324 if (!cpumask_empty(&pmu->supported_cpus))
4325 goto end;
4326
4327 if (!check_hw_exists(&pmu->pmu, pmu->num_counters, pmu->num_counters_fixed))
4328 return false;
4329
4330 pr_info("%s PMU driver: ", pmu->name);
4331
4332 if (pmu->intel_cap.pebs_output_pt_available)
4333 pr_cont("PEBS-via-PT ");
4334
4335 pr_cont("\n");
4336
4337 x86_pmu_show_pmu_cap(pmu->num_counters, pmu->num_counters_fixed,
4338 pmu->intel_ctrl);
4339
4340end:
4341 cpumask_set_cpu(cpu, &pmu->supported_cpus);
4342 cpuc->pmu = &pmu->pmu;
4343
4344 x86_pmu_update_cpu_context(&pmu->pmu, cpu);
4345
4346 return true;
4347}
4348
4349static void intel_pmu_cpu_starting(int cpu)
4350{
4351 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
4352 int core_id = topology_core_id(cpu);
4353 int i;
4354
4355 if (is_hybrid() && !init_hybrid_pmu(cpu))
4356 return;
4357
4358 init_debug_store_on_cpu(cpu);
4359
4360
4361
4362 intel_pmu_lbr_reset();
4363
4364 cpuc->lbr_sel = NULL;
4365
4366 if (x86_pmu.flags & PMU_FL_TFA) {
4367 WARN_ON_ONCE(cpuc->tfa_shadow);
4368 cpuc->tfa_shadow = ~0ULL;
4369 intel_set_tfa(cpuc, false);
4370 }
4371
4372 if (x86_pmu.version > 1)
4373 flip_smm_bit(&x86_pmu.attr_freeze_on_smi);
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384 if (!is_hybrid() && x86_pmu.intel_cap.perf_metrics) {
4385 union perf_capabilities perf_cap;
4386
4387 rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap.capabilities);
4388 if (!perf_cap.perf_metrics) {
4389 x86_pmu.intel_cap.perf_metrics = 0;
4390 x86_pmu.intel_ctrl &= ~(1ULL << GLOBAL_CTRL_EN_PERF_METRICS);
4391 }
4392 }
4393
4394 if (!cpuc->shared_regs)
4395 return;
4396
4397 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
4398 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4399 struct intel_shared_regs *pc;
4400
4401 pc = per_cpu(cpu_hw_events, i).shared_regs;
4402 if (pc && pc->core_id == core_id) {
4403 cpuc->kfree_on_online[0] = cpuc->shared_regs;
4404 cpuc->shared_regs = pc;
4405 break;
4406 }
4407 }
4408 cpuc->shared_regs->core_id = core_id;
4409 cpuc->shared_regs->refcnt++;
4410 }
4411
4412 if (x86_pmu.lbr_sel_map)
4413 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
4414
4415 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
4416 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
4417 struct cpu_hw_events *sibling;
4418 struct intel_excl_cntrs *c;
4419
4420 sibling = &per_cpu(cpu_hw_events, i);
4421 c = sibling->excl_cntrs;
4422 if (c && c->core_id == core_id) {
4423 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
4424 cpuc->excl_cntrs = c;
4425 if (!sibling->excl_thread_id)
4426 cpuc->excl_thread_id = 1;
4427 break;
4428 }
4429 }
4430 cpuc->excl_cntrs->core_id = core_id;
4431 cpuc->excl_cntrs->refcnt++;
4432 }
4433}
4434
4435static void free_excl_cntrs(struct cpu_hw_events *cpuc)
4436{
4437 struct intel_excl_cntrs *c;
4438
4439 c = cpuc->excl_cntrs;
4440 if (c) {
4441 if (c->core_id == -1 || --c->refcnt == 0)
4442 kfree(c);
4443 cpuc->excl_cntrs = NULL;
4444 }
4445
4446 kfree(cpuc->constraint_list);
4447 cpuc->constraint_list = NULL;
4448}
4449
4450static void intel_pmu_cpu_dying(int cpu)
4451{
4452 fini_debug_store_on_cpu(cpu);
4453}
4454
4455void intel_cpuc_finish(struct cpu_hw_events *cpuc)
4456{
4457 struct intel_shared_regs *pc;