1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
41
42#include <linux/init.h>
43#include <linux/smp.h>
44#include <linux/export.h>
45#include <linux/sched.h>
46#include <linux/sched/topology.h>
47#include <linux/sched/hotplug.h>
48#include <linux/sched/task_stack.h>
49#include <linux/percpu.h>
50#include <linux/memblock.h>
51#include <linux/err.h>
52#include <linux/nmi.h>
53#include <linux/tboot.h>
54#include <linux/gfp.h>
55#include <linux/cpuidle.h>
56#include <linux/numa.h>
57#include <linux/pgtable.h>
58#include <linux/overflow.h>
59#include <linux/syscore_ops.h>
60
61#include <asm/acpi.h>
62#include <asm/desc.h>
63#include <asm/nmi.h>
64#include <asm/irq.h>
65#include <asm/realmode.h>
66#include <asm/cpu.h>
67#include <asm/numa.h>
68#include <asm/tlbflush.h>
69#include <asm/mtrr.h>
70#include <asm/mwait.h>
71#include <asm/apic.h>
72#include <asm/io_apic.h>
73#include <asm/fpu/internal.h>
74#include <asm/setup.h>
75#include <asm/uv/uv.h>
76#include <linux/mc146818rtc.h>
77#include <asm/i8259.h>
78#include <asm/misc.h>
79#include <asm/qspinlock.h>
80#include <asm/intel-family.h>
81#include <asm/cpu_device_id.h>
82#include <asm/spec-ctrl.h>
83#include <asm/hw_irq.h>
84#include <asm/stackprotector.h>
85
86#ifdef CONFIG_ACPI_CPPC_LIB
87#include <acpi/cppc_acpi.h>
88#endif
89
90
91DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
92EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
93
94
95DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
96EXPORT_PER_CPU_SYMBOL(cpu_core_map);
97
98
99DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
100EXPORT_PER_CPU_SYMBOL(cpu_die_map);
101
102DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
103
104
105DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
106EXPORT_PER_CPU_SYMBOL(cpu_info);
107
108
109unsigned int __max_logical_packages __read_mostly;
110EXPORT_SYMBOL(__max_logical_packages);
111static unsigned int logical_packages __read_mostly;
112static unsigned int logical_die __read_mostly;
113
114
115int __read_mostly __max_smt_threads = 1;
116
117
118bool x86_topology_update;
119
120int arch_update_cpu_topology(void)
121{
122 int retval = x86_topology_update;
123
124 x86_topology_update = false;
125 return retval;
126}
127
128static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
129{
130 unsigned long flags;
131
132 spin_lock_irqsave(&rtc_lock, flags);
133 CMOS_WRITE(0xa, 0xf);
134 spin_unlock_irqrestore(&rtc_lock, flags);
135 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
136 start_eip >> 4;
137 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
138 start_eip & 0xf;
139}
140
141static inline void smpboot_restore_warm_reset_vector(void)
142{
143 unsigned long flags;
144
145
146
147
148
149 spin_lock_irqsave(&rtc_lock, flags);
150 CMOS_WRITE(0, 0xf);
151 spin_unlock_irqrestore(&rtc_lock, flags);
152
153 *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
154}
155
156static void init_freq_invariance(bool secondary, bool cppc_ready);
157
158
159
160
161
162static void smp_callin(void)
163{
164 int cpuid;
165
166
167
168
169
170
171
172 cpuid = smp_processor_id();
173
174
175
176
177
178
179
180 apic_ap_setup();
181
182
183
184
185
186 smp_store_cpu_info(cpuid);
187
188
189
190
191
192 set_cpu_sibling_map(raw_smp_processor_id());
193
194 init_freq_invariance(true, false);
195
196
197
198
199
200
201
202 calibrate_delay();
203 cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
204 pr_debug("Stack at about %p\n", &cpuid);
205
206 wmb();
207
208 notify_cpu_starting(cpuid);
209
210
211
212
213 cpumask_set_cpu(cpuid, cpu_callin_mask);
214}
215
216static int cpu0_logical_apicid;
217static int enable_start_cpu0;
218
219
220
221static void notrace start_secondary(void *unused)
222{
223
224
225
226
227
228 cr4_init();
229
230#ifdef CONFIG_X86_32
231
232 load_cr3(swapper_pg_dir);
233 __flush_tlb_all();
234#endif
235 cpu_init_secondary();
236 rcu_cpu_starting(raw_smp_processor_id());
237 x86_cpuinit.early_percpu_clock_init();
238 smp_callin();
239
240 enable_start_cpu0 = 0;
241
242
243 barrier();
244
245
246
247 check_tsc_sync_target();
248
249 speculative_store_bypass_ht_init();
250
251
252
253
254
255
256
257 lock_vector_lock();
258 set_cpu_online(smp_processor_id(), true);
259 lapic_online();
260 unlock_vector_lock();
261 cpu_set_state_online(smp_processor_id());
262 x86_platform.nmi_init();
263
264
265 local_irq_enable();
266
267 x86_cpuinit.setup_percpu_clockev();
268
269 wmb();
270 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
271}
272
273
274
275
276
277bool topology_is_primary_thread(unsigned int cpu)
278{
279 return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu));
280}
281
282
283
284
285bool topology_smt_supported(void)
286{
287 return smp_num_siblings > 1;
288}
289
290
291
292
293
294
295int topology_phys_to_logical_pkg(unsigned int phys_pkg)
296{
297 int cpu;
298
299 for_each_possible_cpu(cpu) {
300 struct cpuinfo_x86 *c = &cpu_data(cpu);
301
302 if (c->initialized && c->phys_proc_id == phys_pkg)
303 return c->logical_proc_id;
304 }
305 return -1;
306}
307EXPORT_SYMBOL(topology_phys_to_logical_pkg);
308
309
310
311
312
313int topology_phys_to_logical_die(unsigned int die_id, unsigned int cur_cpu)
314{
315 int cpu;
316 int proc_id = cpu_data(cur_cpu).phys_proc_id;
317
318 for_each_possible_cpu(cpu) {
319 struct cpuinfo_x86 *c = &cpu_data(cpu);
320
321 if (c->initialized && c->cpu_die_id == die_id &&
322 c->phys_proc_id == proc_id)
323 return c->logical_die_id;
324 }
325 return -1;
326}
327EXPORT_SYMBOL(topology_phys_to_logical_die);
328
329
330
331
332
333
334int topology_update_package_map(unsigned int pkg, unsigned int cpu)
335{
336 int new;
337
338
339 new = topology_phys_to_logical_pkg(pkg);
340 if (new >= 0)
341 goto found;
342
343 new = logical_packages++;
344 if (new != pkg) {
345 pr_info("CPU %u Converting physical %u to logical package %u\n",
346 cpu, pkg, new);
347 }
348found:
349 cpu_data(cpu).logical_proc_id = new;
350 return 0;
351}
352
353
354
355
356
357int topology_update_die_map(unsigned int die, unsigned int cpu)
358{
359 int new;
360
361
362 new = topology_phys_to_logical_die(die, cpu);
363 if (new >= 0)
364 goto found;
365
366 new = logical_die++;
367 if (new != die) {
368 pr_info("CPU %u Converting physical %u to logical die %u\n",
369 cpu, die, new);
370 }
371found:
372 cpu_data(cpu).logical_die_id = new;
373 return 0;
374}
375
376void __init smp_store_boot_cpu_info(void)
377{
378 int id = 0;
379 struct cpuinfo_x86 *c = &cpu_data(id);
380
381 *c = boot_cpu_data;
382 c->cpu_index = id;
383 topology_update_package_map(c->phys_proc_id, id);
384 topology_update_die_map(c->cpu_die_id, id);
385 c->initialized = true;
386}
387
388
389
390
391
392void smp_store_cpu_info(int id)
393{
394 struct cpuinfo_x86 *c = &cpu_data(id);
395
396
397 if (!c->initialized)
398 *c = boot_cpu_data;
399 c->cpu_index = id;
400
401
402
403
404 identify_secondary_cpu(c);
405 c->initialized = true;
406}
407
408static bool
409topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
410{
411 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
412
413 return (cpu_to_node(cpu1) == cpu_to_node(cpu2));
414}
415
416static bool
417topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
418{
419 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
420
421 return !WARN_ONCE(!topology_same_node(c, o),
422 "sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
423 "[node: %d != %d]. Ignoring dependency.\n",
424 cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
425}
426
427#define link_mask(mfunc, c1, c2) \
428do { \
429 cpumask_set_cpu((c1), mfunc(c2)); \
430 cpumask_set_cpu((c2), mfunc(c1)); \
431} while (0)
432
433static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
434{
435 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
436 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
437
438 if (c->phys_proc_id == o->phys_proc_id &&
439 c->cpu_die_id == o->cpu_die_id &&
440 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
441 if (c->cpu_core_id == o->cpu_core_id)
442 return topology_sane(c, o, "smt");
443
444 if ((c->cu_id != 0xff) &&
445 (o->cu_id != 0xff) &&
446 (c->cu_id == o->cu_id))
447 return topology_sane(c, o, "smt");
448 }
449
450 } else if (c->phys_proc_id == o->phys_proc_id &&
451 c->cpu_die_id == o->cpu_die_id &&
452 c->cpu_core_id == o->cpu_core_id) {
453 return topology_sane(c, o, "smt");
454 }
455
456 return false;
457}
458
459static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
460{
461 if (c->phys_proc_id == o->phys_proc_id &&
462 c->cpu_die_id == o->cpu_die_id)
463 return true;
464 return false;
465}
466
467
468
469
470
471
472static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
473{
474 if (c->phys_proc_id == o->phys_proc_id)
475 return true;
476 return false;
477}
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493static const struct x86_cpu_id intel_cod_cpu[] = {
494 X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0),
495 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0),
496 X86_MATCH_INTEL_FAM6_MODEL(ANY, 1),
497 {}
498};
499
500static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
501{
502 const struct x86_cpu_id *id = x86_match_cpu(intel_cod_cpu);
503 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
504 bool intel_snc = id && id->driver_data;
505
506
507 if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
508 return false;
509
510
511 if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2))
512 return false;
513
514
515
516
517
518
519 if (match_pkg(c, o) && !topology_same_node(c, o) && intel_snc)
520 return false;
521
522 return topology_sane(c, o, "llc");
523}
524
525
526#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
527static inline int x86_sched_itmt_flags(void)
528{
529 return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0;
530}
531
532#ifdef CONFIG_SCHED_MC
533static int x86_core_flags(void)
534{
535 return cpu_core_flags() | x86_sched_itmt_flags();
536}
537#endif
538#ifdef CONFIG_SCHED_SMT
539static int x86_smt_flags(void)
540{
541 return cpu_smt_flags() | x86_sched_itmt_flags();
542}
543#endif
544#endif
545
546static struct sched_domain_topology_level x86_numa_in_package_topology[] = {
547#ifdef CONFIG_SCHED_SMT
548 { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
549#endif
550#ifdef CONFIG_SCHED_MC
551 { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
552#endif
553 { NULL, },
554};
555
556static struct sched_domain_topology_level x86_topology[] = {
557#ifdef CONFIG_SCHED_SMT
558 { cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) },
559#endif
560#ifdef CONFIG_SCHED_MC
561 { cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) },
562#endif
563 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
564 { NULL, },
565};
566
567
568
569
570
571
572static bool x86_has_numa_in_package;
573
574void set_cpu_sibling_map(int cpu)
575{
576 bool has_smt = smp_num_siblings > 1;
577 bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
578 struct cpuinfo_x86 *c = &cpu_data(cpu);
579 struct cpuinfo_x86 *o;
580 int i, threads;
581
582 cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
583
584 if (!has_mp) {
585 cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
586 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
587 cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
588 cpumask_set_cpu(cpu, topology_die_cpumask(cpu));
589 c->booted_cores = 1;
590 return;
591 }
592
593 for_each_cpu(i, cpu_sibling_setup_mask) {
594 o = &cpu_data(i);
595
596 if (match_pkg(c, o) && !topology_same_node(c, o))
597 x86_has_numa_in_package = true;
598
599 if ((i == cpu) || (has_smt && match_smt(c, o)))
600 link_mask(topology_sibling_cpumask, cpu, i);
601
602 if ((i == cpu) || (has_mp && match_llc(c, o)))
603 link_mask(cpu_llc_shared_mask, cpu, i);
604
605 if ((i == cpu) || (has_mp && match_die(c, o)))
606 link_mask(topology_die_cpumask, cpu, i);
607 }
608
609 threads = cpumask_weight(topology_sibling_cpumask(cpu));
610 if (threads > __max_smt_threads)
611 __max_smt_threads = threads;
612
613
614
615
616
617 for_each_cpu(i, cpu_sibling_setup_mask) {
618 o = &cpu_data(i);
619
620 if ((i == cpu) || (has_mp && match_pkg(c, o))) {
621 link_mask(topology_core_cpumask, cpu, i);
622
623
624
625
626 if (threads == 1) {
627
628
629
630
631 if (cpumask_first(
632 topology_sibling_cpumask(i)) == i)
633 c->booted_cores++;
634
635
636
637
638 if (i != cpu)
639 cpu_data(i).booted_cores++;
640 } else if (i != cpu && !c->booted_cores)
641 c->booted_cores = cpu_data(i).booted_cores;
642 }
643 }
644}
645
646
647const struct cpumask *cpu_coregroup_mask(int cpu)
648{
649 return cpu_llc_shared_mask(cpu);
650}
651
652static void impress_friends(void)
653{
654 int cpu;
655 unsigned long bogosum = 0;
656
657
658
659 pr_debug("Before bogomips\n");
660 for_each_possible_cpu(cpu)
661 if (cpumask_test_cpu(cpu, cpu_callout_mask))
662 bogosum += cpu_data(cpu).loops_per_jiffy;
663 pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n",
664 num_online_cpus(),
665 bogosum/(500000/HZ),
666 (bogosum/(5000/HZ))%100);
667
668 pr_debug("Before bogocount - setting activated=1\n");
669}
670
671void __inquire_remote_apic(int apicid)
672{
673 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
674 const char * const names[] = { "ID", "VERSION", "SPIV" };
675 int timeout;
676 u32 status;
677
678 pr_info("Inquiring remote APIC 0x%x...\n", apicid);
679
680 for (i = 0; i < ARRAY_SIZE(regs); i++) {
681 pr_info("... APIC 0x%x %s: ", apicid, names[i]);
682
683
684
685
686 status = safe_apic_wait_icr_idle();
687 if (status)
688 pr_cont("a previous APIC delivery may have failed\n");
689
690 apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
691
692 timeout = 0;
693 do {
694 udelay(100);
695 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
696 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
697
698 switch (status) {
699 case APIC_ICR_RR_VALID:
700 status = apic_read(APIC_RRR);
701 pr_cont("%08x\n", status);
702 break;
703 default:
704 pr_cont("failed\n");
705 }
706 }
707}
708
709
710
711
712
713
714
715
716
717
718
719#define UDELAY_10MS_DEFAULT 10000
720
721static unsigned int init_udelay = UINT_MAX;
722
723static int __init cpu_init_udelay(char *str)
724{
725 get_option(&str, &init_udelay);
726
727 return 0;
728}
729early_param("cpu_init_udelay", cpu_init_udelay);
730
731static void __init smp_quirk_init_udelay(void)
732{
733
734 if (init_udelay != UINT_MAX)
735 return;
736
737
738 if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
739 ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) ||
740 ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
741 init_udelay = 0;
742 return;
743 }
744
745 init_udelay = UDELAY_10MS_DEFAULT;
746}
747
748
749
750
751
752
753int
754wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
755{
756 u32 dm = apic->dest_mode_logical ? APIC_DEST_LOGICAL : APIC_DEST_PHYSICAL;
757 unsigned long send_status, accept_status = 0;
758 int maxlvt;
759
760
761
762
763 apic_icr_write(APIC_DM_NMI | dm, apicid);
764
765 pr_debug("Waiting for send to finish...\n");
766 send_status = safe_apic_wait_icr_idle();
767
768
769
770
771 udelay(200);
772 if (APIC_INTEGRATED(boot_cpu_apic_version)) {
773 maxlvt = lapic_get_maxlvt();
774 if (maxlvt > 3)
775 apic_write(APIC_ESR, 0);
776 accept_status = (apic_read(APIC_ESR) & 0xEF);
777 }
778 pr_debug("NMI sent\n");
779
780 if (send_status)
781 pr_err("APIC never delivered???\n");
782 if (accept_status)
783 pr_err("APIC delivery error (%lx)\n", accept_status);
784
785 return (send_status | accept_status);
786}
787
788static int
789wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
790{
791 unsigned long send_status = 0, accept_status = 0;
792 int maxlvt, num_starts, j;
793
794 maxlvt = lapic_get_maxlvt();
795
796
797
798
799 if (APIC_INTEGRATED(boot_cpu_apic_version)) {
800 if (maxlvt > 3)
801 apic_write(APIC_ESR, 0);
802 apic_read(APIC_ESR);
803 }
804
805 pr_debug("Asserting INIT\n");
806
807
808
809
810
811
812
813 apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
814 phys_apicid);
815
816 pr_debug("Waiting for send to finish...\n");
817 send_status = safe_apic_wait_icr_idle();
818
819 udelay(init_udelay);
820
821 pr_debug("Deasserting INIT\n");
822
823
824
825 apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
826
827 pr_debug("Waiting for send to finish...\n");
828 send_status = safe_apic_wait_icr_idle();
829
830 mb();
831
832
833
834
835
836
837
838 if (APIC_INTEGRATED(boot_cpu_apic_version))
839 num_starts = 2;
840 else
841 num_starts = 0;
842
843
844
845
846 pr_debug("#startup loops: %d\n", num_starts);
847
848 for (j = 1; j <= num_starts; j++) {
849 pr_debug("Sending STARTUP #%d\n", j);
850 if (maxlvt > 3)
851 apic_write(APIC_ESR, 0);
852 apic_read(APIC_ESR);
853 pr_debug("After apic_write\n");
854
855
856
857
858
859
860
861
862 apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
863 phys_apicid);
864
865
866
867
868 if (init_udelay == 0)
869 udelay(10);
870 else
871 udelay(300);
872
873 pr_debug("Startup point 1\n");
874
875 pr_debug("Waiting for send to finish...\n");
876 send_status = safe_apic_wait_icr_idle();
877
878
879
880
881 if (init_udelay == 0)
882 udelay(10);
883 else
884 udelay(200);
885
886 if (maxlvt > 3)
887 apic_write(APIC_ESR, 0);
888 accept_status = (apic_read(APIC_ESR) & 0xEF);
889 if (send_status || accept_status)
890 break;
891 }
892 pr_debug("After Startup\n");
893
894 if (send_status)
895 pr_err("APIC never delivered???\n");
896 if (accept_status)
897 pr_err("APIC delivery error (%lx)\n", accept_status);
898
899 return (send_status | accept_status);
900}
901
902
903static void announce_cpu(int cpu, int apicid)
904{
905 static int current_node = NUMA_NO_NODE;
906 int node = early_cpu_to_node(cpu);
907 static int width, node_width;
908
909 if (!width)
910 width = num_digits(num_possible_cpus()) + 1;
911
912 if (!node_width)
913 node_width = num_digits(num_possible_nodes()) + 1;
914
915 if (cpu == 1)
916 printk(KERN_INFO "x86: Booting SMP configuration:\n");
917
918 if (system_state < SYSTEM_RUNNING) {
919 if (node != current_node) {
920 if (current_node > (-1))
921 pr_cont("\n");
922 current_node = node;
923
924 printk(KERN_INFO ".... node %*s#%d, CPUs: ",
925 node_width - num_digits(node), " ", node);
926 }
927
928
929 if (cpu == 1)
930 pr_cont("%*s", width + 1, " ");
931
932 pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu);
933
934 } else
935 pr_info("Booting Node %d Processor %d APIC 0x%x\n",
936 node, cpu, apicid);
937}
938
939static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs)
940{
941 int cpu;
942
943 cpu = smp_processor_id();
944 if (cpu == 0 && !cpu_online(cpu) && enable_start_cpu0)
945 return NMI_HANDLED;
946
947 return NMI_DONE;
948}
949
950
951
952
953
954
955
956
957
958
959
960
961
962static int
963wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
964 int *cpu0_nmi_registered)
965{
966 int id;
967 int boot_error;
968
969 preempt_disable();
970
971
972
973
974 if (cpu) {
975 boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
976 goto out;
977 }
978
979
980
981
982
983
984 boot_error = register_nmi_handler(NMI_LOCAL,
985 wakeup_cpu0_nmi, 0, "wake_cpu0");
986
987 if (!boot_error) {
988 enable_start_cpu0 = 1;
989 *cpu0_nmi_registered = 1;
990 id = apic->dest_mode_logical ? cpu0_logical_apicid : apicid;
991 boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
992 }
993
994out:
995 preempt_enable();
996
997 return boot_error;
998}
999
1000int common_cpu_up(unsigned int cpu, struct task_struct *idle)
1001{
1002 int ret;
1003
1004
1005 alternatives_enable_smp();
1006
1007 per_cpu(current_task, cpu) = idle;
1008 cpu_init_stack_canary(cpu, idle);
1009
1010
1011 ret = irq_init_percpu_irqstack(cpu);
1012 if (ret)
1013 return ret;
1014
1015#ifdef CONFIG_X86_32
1016
1017 per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle);
1018#else
1019 initial_gs = per_cpu_offset(cpu);
1020#endif
1021 return 0;
1022}
1023
1024
1025
1026
1027
1028
1029
1030static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle,
1031 int *cpu0_nmi_registered)
1032{
1033
1034 unsigned long start_ip = real_mode_header->trampoline_start;
1035
1036 unsigned long boot_error = 0;
1037 unsigned long timeout;
1038
1039 idle->thread.sp = (unsigned long)task_pt_regs(idle);
1040 early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu);
1041 initial_code = (unsigned long)start_secondary;
1042 initial_stack = idle->thread.sp;
1043
1044
1045 init_espfix_ap(cpu);
1046
1047
1048 announce_cpu(cpu, apicid);
1049
1050
1051
1052
1053
1054
1055 if (x86_platform.legacy.warm_reset) {
1056
1057 pr_debug("Setting warm reset code and vector.\n");
1058
1059 smpboot_setup_warm_reset_vector(start_ip);
1060
1061
1062
1063 if (APIC_INTEGRATED(boot_cpu_apic_version)) {
1064 apic_write(APIC_ESR, 0);
1065 apic_read(APIC_ESR);
1066 }
1067 }
1068
1069
1070
1071
1072
1073
1074
1075 cpumask_clear_cpu(cpu, cpu_initialized_mask);
1076 smp_mb();
1077
1078
1079
1080
1081
1082
1083
1084 if (apic->wakeup_secondary_cpu)
1085 boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
1086 else
1087 boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
1088 cpu0_nmi_registered);
1089
1090 if (!boot_error) {
1091
1092
1093
1094 boot_error = -1;
1095 timeout = jiffies + 10*HZ;
1096 while (time_before(jiffies, timeout)) {
1097 if (cpumask_test_cpu(cpu, cpu_initialized_mask)) {
1098
1099
1100
1101 cpumask_set_cpu(cpu, cpu_callout_mask);
1102 boot_error = 0;
1103 break;
1104 }
1105 schedule();
1106 }
1107 }
1108
1109 if (!boot_error) {
1110
1111
1112
1113 while (!cpumask_test_cpu(cpu, cpu_callin_mask)) {
1114
1115
1116
1117
1118
1119
1120 schedule();
1121 }
1122 }
1123
1124 if (x86_platform.legacy.warm_reset) {
1125
1126
1127
1128 smpboot_restore_warm_reset_vector();
1129 }
1130
1131 return boot_error;
1132}
1133
1134int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
1135{
1136 int apicid = apic->cpu_present_to_apicid(cpu);
1137 int cpu0_nmi_registered = 0;
1138 unsigned long flags;
1139 int err, ret = 0;
1140
1141 lockdep_assert_irqs_enabled();
1142
1143 pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu);
1144
1145 if (apicid == BAD_APICID ||
1146 !physid_isset(apicid, phys_cpu_present_map) ||
1147 !apic->apic_id_valid(apicid)) {
1148 pr_err("%s: bad cpu %d\n", __func__, cpu);
1149 return -EINVAL;
1150 }
1151
1152
1153
1154
1155 if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
1156 pr_debug("do_boot_cpu %d Already started\n", cpu);
1157 return -ENOSYS;
1158 }
1159
1160
1161
1162
1163
1164 mtrr_save_state();
1165
1166
1167 err = cpu_check_up_prepare(cpu);
1168 if (err && err != -EBUSY)
1169 return err;
1170
1171
1172 per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
1173
1174 err = common_cpu_up(cpu, tidle);
1175 if (err)
1176 return err;
1177
1178 err = do_boot_cpu(apicid, cpu, tidle, &cpu0_nmi_registered);
1179 if (err) {
1180 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
1181 ret = -EIO;
1182 goto unreg_nmi;
1183 }
1184
1185
1186
1187
1188
1189 local_irq_save(flags);
1190 check_tsc_sync_source(cpu);
1191 local_irq_restore(flags);
1192
1193 while (!cpu_online(cpu)) {
1194 cpu_relax();
1195 touch_nmi_watchdog();
1196 }
1197
1198unreg_nmi:
1199
1200
1201
1202
1203 if (cpu0_nmi_registered)
1204 unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
1205
1206 return ret;
1207}
1208
1209
1210
1211
1212void arch_disable_smp_support(void)
1213{
1214 disable_ioapic_support();
1215}
1216
1217
1218
1219
1220
1221
1222static __init void disable_smp(void)
1223{
1224 pr_info("SMP disabled\n");
1225
1226 disable_ioapic_support();
1227
1228 init_cpu_present(cpumask_of(0));
1229 init_cpu_possible(cpumask_of(0));
1230
1231 if (smp_found_config)
1232 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1233 else
1234 physid_set_mask_of_physid(0, &phys_cpu_present_map);
1235 cpumask_set_cpu(0, topology_sibling_cpumask(0));
1236 cpumask_set_cpu(0, topology_core_cpumask(0));
1237 cpumask_set_cpu(0, topology_die_cpumask(0));
1238}
1239
1240
1241
1242
1243static void __init smp_sanity_check(void)
1244{
1245 preempt_disable();
1246
1247#if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
1248 if (def_to_bigsmp && nr_cpu_ids > 8) {
1249 unsigned int cpu;
1250 unsigned nr;
1251
1252 pr_warn("More than 8 CPUs detected - skipping them\n"
1253 "Use CONFIG_X86_BIGSMP\n");
1254
1255 nr = 0;
1256 for_each_present_cpu(cpu) {
1257 if (nr >= 8)
1258 set_cpu_present(cpu, false);
1259 nr++;
1260 }
1261
1262 nr = 0;
1263 for_each_possible_cpu(cpu) {
1264 if (nr >= 8)
1265 set_cpu_possible(cpu, false);
1266 nr++;
1267 }
1268
1269 nr_cpu_ids = 8;
1270 }
1271#endif
1272
1273 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
1274 pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n",
1275 hard_smp_processor_id());
1276
1277 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1278 }
1279
1280
1281
1282
1283
1284 if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
1285 pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n",
1286 boot_cpu_physical_apicid);
1287 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1288 }
1289 preempt_enable();
1290}
1291
1292static void __init smp_cpu_index_default(void)
1293{
1294 int i;
1295 struct cpuinfo_x86 *c;
1296
1297 for_each_possible_cpu(i) {
1298 c = &cpu_data(i);
1299
1300 c->cpu_index = nr_cpu_ids;
1301 }
1302}
1303
1304static void __init smp_get_logical_apicid(void)
1305{
1306 if (x2apic_mode)
1307 cpu0_logical_apicid = apic_read(APIC_LDR);
1308 else
1309 cpu0_logical_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
1310}
1311
1312
1313
1314
1315
1316
1317void __init native_smp_prepare_cpus(unsigned int max_cpus)
1318{
1319 unsigned int i;
1320
1321 smp_cpu_index_default();
1322
1323
1324
1325
1326 smp_store_boot_cpu_info();
1327 cpumask_copy(cpu_callin_mask, cpumask_of(0));
1328 mb();
1329
1330 for_each_possible_cpu(i) {
1331 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1332 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1333 zalloc_cpumask_var(&per_cpu(cpu_die_map, i), GFP_KERNEL);
1334 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
1335 }
1336
1337
1338
1339
1340
1341
1342
1343
1344 set_sched_topology(x86_topology);
1345
1346 set_cpu_sibling_map(0);
1347 init_freq_invariance(false, false);
1348 smp_sanity_check();
1349
1350 switch (apic_intr_mode) {
1351 case APIC_PIC:
1352 case APIC_VIRTUAL_WIRE_NO_CONFIG:
1353 disable_smp();
1354 return;
1355 case APIC_SYMMETRIC_IO_NO_ROUTING:
1356 disable_smp();
1357
1358 x86_init.timers.setup_percpu_clockev();
1359 return;
1360 case APIC_VIRTUAL_WIRE:
1361 case APIC_SYMMETRIC_IO:
1362 break;
1363 }
1364
1365
1366 x86_init.timers.setup_percpu_clockev();
1367
1368 smp_get_logical_apicid();
1369
1370 pr_info("CPU0: ");
1371 print_cpu_info(&cpu_data(0));
1372
1373 uv_system_init();
1374
1375 set_mtrr_aps_delayed_init();
1376
1377 smp_quirk_init_udelay();
1378
1379 speculative_store_bypass_ht_init();
1380}
1381
1382void arch_thaw_secondary_cpus_begin(void)
1383{
1384 set_mtrr_aps_delayed_init();
1385}
1386
1387void arch_thaw_secondary_cpus_end(void)
1388{
1389 mtrr_aps_init();
1390}
1391
1392
1393
1394
1395void __init native_smp_prepare_boot_cpu(void)
1396{
1397 int me = smp_processor_id();
1398 switch_to_new_gdt(me);
1399
1400 cpumask_set_cpu(me, cpu_callout_mask);
1401 cpu_set_state_online(me);
1402 native_pv_lock_init();
1403}
1404
1405void __init calculate_max_logical_packages(void)
1406{
1407 int ncpus;
1408
1409
1410
1411
1412
1413 ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
1414 __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
1415 pr_info("Max logical packages: %u\n", __max_logical_packages);
1416}
1417
1418void __init native_smp_cpus_done(unsigned int max_cpus)
1419{
1420 pr_debug("Boot done\n");
1421
1422 calculate_max_logical_packages();
1423
1424 if (x86_has_numa_in_package)
1425 set_sched_topology(x86_numa_in_package_topology);
1426
1427 nmi_selftest();
1428 impress_friends();
1429 mtrr_aps_init();
1430}
1431
1432static int __initdata setup_possible_cpus = -1;
1433static int __init _setup_possible_cpus(char *str)
1434{
1435 get_option(&str, &setup_possible_cpus);
1436 return 0;
1437}
1438early_param("possible_cpus", _setup_possible_cpus);
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458__init void prefill_possible_map(void)
1459{
1460 int i, possible;
1461
1462
1463 if (!num_processors) {
1464 if (boot_cpu_has(X86_FEATURE_APIC)) {
1465 int apicid = boot_cpu_physical_apicid;
1466 int cpu = hard_smp_processor_id();
1467
1468 pr_warn("Boot CPU (id %d) not listed by BIOS\n", cpu);
1469
1470
1471 if (apic->cpu_present_to_apicid(0) == BAD_APICID &&
1472 apic->apic_id_valid(apicid))
1473 generic_processor_info(apicid, boot_cpu_apic_version);
1474 }
1475
1476 if (!num_processors)
1477 num_processors = 1;
1478 }
1479
1480 i = setup_max_cpus ?: 1;
1481 if (setup_possible_cpus == -1) {
1482 possible = num_processors;
1483#ifdef CONFIG_HOTPLUG_CPU
1484 if (setup_max_cpus)
1485 possible += disabled_cpus;
1486#else
1487 if (possible > i)
1488 possible = i;
1489#endif
1490 } else
1491 possible = setup_possible_cpus;
1492
1493 total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1494
1495
1496 if (possible > nr_cpu_ids) {
1497 pr_warn("%d Processors exceeds NR_CPUS limit of %u\n",
1498 possible, nr_cpu_ids);
1499 possible = nr_cpu_ids;
1500 }
1501
1502#ifdef CONFIG_HOTPLUG_CPU
1503 if (!setup_max_cpus)
1504#endif
1505 if (possible > i) {
1506 pr_warn("%d Processors exceeds max_cpus limit of %u\n",
1507 possible, setup_max_cpus);
1508 possible = i;
1509 }
1510
1511 nr_cpu_ids = possible;
1512
1513 pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
1514 possible, max_t(int, possible - num_processors, 0));
1515
1516 reset_cpu_possible_mask();
1517
1518 for (i = 0; i < possible; i++)
1519 set_cpu_possible(i, true);
1520}
1521
1522#ifdef CONFIG_HOTPLUG_CPU
1523
1524
1525static void recompute_smt_state(void)
1526{
1527 int max_threads, cpu;
1528
1529 max_threads = 0;
1530 for_each_online_cpu (cpu) {
1531 int threads = cpumask_weight(topology_sibling_cpumask(cpu));
1532
1533 if (threads > max_threads)
1534 max_threads = threads;
1535 }
1536 __max_smt_threads = max_threads;
1537}
1538
1539static void remove_siblinginfo(int cpu)
1540{
1541 int sibling;
1542 struct cpuinfo_x86 *c = &cpu_data(cpu);
1543
1544 for_each_cpu(sibling, topology_core_cpumask(cpu)) {
1545 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
1546
1547
1548
1549 if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1)
1550 cpu_data(sibling).booted_cores--;
1551 }
1552
1553 for_each_cpu(sibling, topology_die_cpumask(cpu))
1554 cpumask_clear_cpu(cpu, topology_die_cpumask(sibling));
1555 for_each_cpu(sibling, topology_sibling_cpumask(cpu))
1556 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
1557 for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
1558 cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
1559 cpumask_clear(cpu_llc_shared_mask(cpu));
1560 cpumask_clear(topology_sibling_cpumask(cpu));
1561 cpumask_clear(topology_core_cpumask(cpu));
1562 cpumask_clear(topology_die_cpumask(cpu));
1563 c->cpu_core_id = 0;
1564 c->booted_cores = 0;
1565 cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
1566 recompute_smt_state();
1567}
1568
1569static void remove_cpu_from_maps(int cpu)
1570{
1571 set_cpu_online(cpu, false);
1572 cpumask_clear_cpu(cpu, cpu_callout_mask);
1573 cpumask_clear_cpu(cpu, cpu_callin_mask);
1574
1575 cpumask_clear_cpu(cpu, cpu_initialized_mask);
1576 numa_remove_cpu(cpu);
1577}
1578
1579void cpu_disable_common(void)
1580{
1581 int cpu = smp_processor_id();
1582
1583 remove_siblinginfo(cpu);
1584
1585
1586 lock_vector_lock();
1587 remove_cpu_from_maps(cpu);
1588 unlock_vector_lock();
1589 fixup_irqs();
1590 lapic_offline();
1591}
1592
1593int native_cpu_disable(void)
1594{
1595 int ret;
1596
1597 ret = lapic_can_unplug_cpu();
1598 if (ret)
1599 return ret;
1600
1601 cpu_disable_common();
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621 apic_soft_disable();
1622
1623 return 0;
1624}
1625
1626int common_cpu_die(unsigned int cpu)
1627{
1628 int ret = 0;
1629
1630
1631
1632
1633 if (cpu_wait_death(cpu, 5)) {
1634 if (system_state == SYSTEM_RUNNING)
1635 pr_info("CPU %u is now offline\n", cpu);
1636 } else {
1637 pr_err("CPU %u didn't die...\n", cpu);
1638 ret = -1;
1639 }
1640
1641 return ret;
1642}
1643
1644void native_cpu_die(unsigned int cpu)
1645{
1646 common_cpu_die(cpu);
1647}
1648
1649void play_dead_common(void)
1650{
1651 idle_task_exit();
1652
1653
1654 (void)cpu_report_death();
1655
1656
1657
1658
1659 local_irq_disable();
1660}
1661
1662
1663
1664
1665
1666
1667void cond_wakeup_cpu0(void)
1668{
1669 if (smp_processor_id() == 0 && enable_start_cpu0)
1670 start_cpu0();
1671}
1672EXPORT_SYMBOL_GPL(cond_wakeup_cpu0);
1673
1674
1675
1676
1677
1678static inline void mwait_play_dead(void)
1679{
1680 unsigned int eax, ebx, ecx, edx;
1681 unsigned int highest_cstate = 0;
1682 unsigned int highest_subcstate = 0;
1683 void *mwait_ptr;
1684 int i;
1685
1686 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1687 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
1688 return;
1689 if (!this_cpu_has(X86_FEATURE_MWAIT))
1690 return;
1691 if (!this_cpu_has(X86_FEATURE_CLFLUSH))
1692 return;
1693 if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
1694 return;
1695
1696 eax = CPUID_MWAIT_LEAF;
1697 ecx = 0;
1698 native_cpuid(&eax, &ebx, &ecx, &edx);
1699
1700
1701
1702
1703
1704 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
1705 eax = 0;
1706 } else {
1707 edx >>= MWAIT_SUBSTATE_SIZE;
1708 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
1709 if (edx & MWAIT_SUBSTATE_MASK) {
1710 highest_cstate = i;
1711 highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
1712 }
1713 }
1714 eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
1715 (highest_subcstate - 1);
1716 }
1717
1718
1719
1720
1721
1722
1723 mwait_ptr = ¤t_thread_info()->flags;
1724
1725 wbinvd();
1726
1727 while (1) {
1728
1729
1730
1731
1732
1733
1734
1735 mb();
1736 clflush(mwait_ptr);
1737 mb();
1738 __monitor(mwait_ptr, 0, 0);
1739 mb();
1740 __mwait(eax, 0);
1741
1742 cond_wakeup_cpu0();
1743 }
1744}
1745
1746void hlt_play_dead(void)
1747{
1748 if (__this_cpu_read(cpu_info.x86) >= 4)
1749 wbinvd();
1750
1751 while (1) {
1752 native_halt();
1753
1754 cond_wakeup_cpu0();
1755 }
1756}
1757
1758void native_play_dead(void)
1759{
1760 play_dead_common();
1761 tboot_shutdown(TB_SHUTDOWN_WFS);
1762
1763 mwait_play_dead();
1764 if (cpuidle_play_dead())
1765 hlt_play_dead();
1766}
1767
1768#else
1769int native_cpu_disable(void)
1770{
1771 return -ENOSYS;
1772}
1773
1774void native_cpu_die(unsigned int cpu)
1775{
1776
1777 BUG();
1778}
1779
1780void native_play_dead(void)
1781{
1782 BUG();
1783}
1784
1785#endif
1786
1787#ifdef CONFIG_X86_64
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822DEFINE_STATIC_KEY_FALSE(arch_scale_freq_key);
1823
1824static DEFINE_PER_CPU(u64, arch_prev_aperf);
1825static DEFINE_PER_CPU(u64, arch_prev_mperf);
1826static u64 arch_turbo_freq_ratio = SCHED_CAPACITY_SCALE;
1827static u64 arch_max_freq_ratio = SCHED_CAPACITY_SCALE;
1828
1829void arch_set_max_freq_ratio(bool turbo_disabled)
1830{
1831 arch_max_freq_ratio = turbo_disabled ? SCHED_CAPACITY_SCALE :
1832 arch_turbo_freq_ratio;
1833}
1834EXPORT_SYMBOL_GPL(arch_set_max_freq_ratio);
1835
1836static bool turbo_disabled(void)
1837{
1838 u64 misc_en;
1839 int err;
1840
1841 err = rdmsrl_safe(MSR_IA32_MISC_ENABLE, &misc_en);
1842 if (err)
1843 return false;
1844
1845 return (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
1846}
1847
1848static bool slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
1849{
1850 int err;
1851
1852 err = rdmsrl_safe(MSR_ATOM_CORE_RATIOS, base_freq);
1853 if (err)
1854 return false;
1855
1856 err = rdmsrl_safe(MSR_ATOM_CORE_TURBO_RATIOS, turbo_freq);
1857 if (err)
1858 return false;
1859
1860 *base_freq = (*base_freq >> 16) & 0x3F;
1861 *turbo_freq = *turbo_freq & 0x3F;
1862
1863 return true;
1864}
1865
1866#define X86_MATCH(model) \
1867 X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \
1868 INTEL_FAM6_##model, X86_FEATURE_APERFMPERF, NULL)
1869
1870static const struct x86_cpu_id has_knl_turbo_ratio_limits[] = {
1871 X86_MATCH(XEON_PHI_KNL),
1872 X86_MATCH(XEON_PHI_KNM),
1873 {}
1874};
1875
1876static const struct x86_cpu_id has_skx_turbo_ratio_limits[] = {
1877 X86_MATCH(SKYLAKE_X),
1878 {}
1879};
1880
1881static const struct x86_cpu_id has_glm_turbo_ratio_limits[] = {
1882 X86_MATCH(ATOM_GOLDMONT),
1883 X86_MATCH(ATOM_GOLDMONT_D),
1884 X86_MATCH(ATOM_GOLDMONT_PLUS),
1885 {}
1886};
1887
1888static bool knl_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq,
1889 int num_delta_fratio)
1890{
1891 int fratio, delta_fratio, found;
1892 int err, i;
1893 u64 msr;
1894
1895 err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
1896 if (err)
1897 return false;
1898
1899 *base_freq = (*base_freq >> 8) & 0xFF;
1900
1901 err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr);
1902 if (err)
1903 return false;
1904
1905 fratio = (msr >> 8) & 0xFF;
1906 i = 16;
1907 found = 0;
1908 do {
1909 if (found >= num_delta_fratio) {
1910 *turbo_freq = fratio;
1911 return true;
1912 }
1913
1914 delta_fratio = (msr >> (i + 5)) & 0x7;
1915
1916 if (delta_fratio) {
1917 found += 1;
1918 fratio -= delta_fratio;
1919 }
1920
1921 i += 8;
1922 } while (i < 64);
1923
1924 return true;
1925}
1926
1927static bool skx_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq, int size)
1928{
1929 u64 ratios, counts;
1930 u32 group_size;
1931 int err, i;
1932
1933 err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
1934 if (err)
1935 return false;
1936
1937 *base_freq = (*base_freq >> 8) & 0xFF;
1938
1939 err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &ratios);
1940 if (err)
1941 return false;
1942
1943 err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT1, &counts);
1944 if (err)
1945 return false;
1946
1947 for (i = 0; i < 64; i += 8) {
1948 group_size = (counts >> i) & 0xFF;
1949 if (group_size >= size) {
1950 *turbo_freq = (ratios >> i) & 0xFF;
1951 return true;
1952 }
1953 }
1954
1955 return false;
1956}
1957
1958static bool core_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
1959{
1960 u64 msr;
1961 int err;
1962
1963 err = rdmsrl_safe(MSR_PLATFORM_INFO, base_freq);
1964 if (err)
1965 return false;
1966
1967 err = rdmsrl_safe(MSR_TURBO_RATIO_LIMIT, &msr);
1968 if (err)
1969 return false;
1970
1971 *base_freq = (*base_freq >> 8) & 0xFF;
1972 *turbo_freq = (msr >> 24) & 0xFF;
1973
1974
1975 if (!*turbo_freq)
1976 *turbo_freq = msr & 0xFF;
1977
1978 return true;
1979}
1980
1981static bool intel_set_max_freq_ratio(void)
1982{
1983 u64 base_freq, turbo_freq;
1984 u64 turbo_ratio;
1985
1986 if (slv_set_max_freq_ratio(&base_freq, &turbo_freq))
1987 goto out;
1988
1989 if (x86_match_cpu(has_glm_turbo_ratio_limits) &&
1990 skx_set_max_freq_ratio(&base_freq, &turbo_freq, 1))
1991 goto out;
1992
1993 if (x86_match_cpu(has_knl_turbo_ratio_limits) &&
1994 knl_set_max_freq_ratio(&base_freq, &turbo_freq, 1))
1995 goto out;
1996
1997 if (x86_match_cpu(has_skx_turbo_ratio_limits) &&
1998 skx_set_max_freq_ratio(&base_freq, &turbo_freq, 4))
1999 goto out;
2000
2001 if (core_set_max_freq_ratio(&base_freq, &turbo_freq))
2002 goto out;
2003
2004 return false;
2005
2006out:
2007
2008
2009
2010
2011
2012
2013 if (!base_freq || !turbo_freq) {
2014 pr_debug("Couldn't determine cpu base or turbo frequency, necessary for scale-invariant accounting.\n");
2015 return false;
2016 }
2017
2018 turbo_ratio = div_u64(turbo_freq * SCHED_CAPACITY_SCALE, base_freq);
2019 if (!turbo_ratio) {
2020 pr_debug("Non-zero turbo and base frequencies led to a 0 ratio.\n");
2021 return false;
2022 }
2023
2024 arch_turbo_freq_ratio = turbo_ratio;
2025 arch_set_max_freq_ratio(turbo_disabled());
2026
2027 return true;
2028}
2029
2030#ifdef CONFIG_ACPI_CPPC_LIB
2031static bool amd_set_max_freq_ratio(void)
2032{
2033 struct cppc_perf_caps perf_caps;
2034 u64 highest_perf, nominal_perf;
2035 u64 perf_ratio;
2036 int rc;
2037
2038 rc = cppc_get_perf_caps(0, &perf_caps);
2039 if (rc) {
2040 pr_debug("Could not retrieve perf counters (%d)\n", rc);
2041 return false;
2042 }
2043
2044 highest_perf = amd_get_highest_perf();
2045 nominal_perf = perf_caps.nominal_perf;
2046
2047 if (!highest_perf || !nominal_perf) {
2048 pr_debug("Could not retrieve highest or nominal performance\n");
2049 return false;
2050 }
2051
2052 perf_ratio = div_u64(highest_perf * SCHED_CAPACITY_SCALE, nominal_perf);
2053
2054 perf_ratio = (perf_ratio + SCHED_CAPACITY_SCALE) >> 1;
2055 if (!perf_ratio) {
2056 pr_debug("Non-zero highest/nominal perf values led to a 0 ratio\n");
2057 return false;
2058 }
2059
2060 arch_turbo_freq_ratio = perf_ratio;
2061 arch_set_max_freq_ratio(false);
2062
2063 return true;
2064}
2065#else
2066static bool amd_set_max_freq_ratio(void)
2067{
2068 return false;
2069}
2070#endif
2071
2072static void init_counter_refs(void)
2073{
2074 u64 aperf, mperf;
2075
2076 rdmsrl(MSR_IA32_APERF, aperf);
2077 rdmsrl(MSR_IA32_MPERF, mperf);
2078
2079 this_cpu_write(arch_prev_aperf, aperf);
2080 this_cpu_write(arch_prev_mperf, mperf);
2081}
2082
2083#ifdef CONFIG_PM_SLEEP
2084static struct syscore_ops freq_invariance_syscore_ops = {
2085 .resume = init_counter_refs,
2086};
2087
2088static void register_freq_invariance_syscore_ops(void)
2089{
2090
2091 if (freq_invariance_syscore_ops.node.prev)
2092 return;
2093
2094 register_syscore_ops(&freq_invariance_syscore_ops);
2095}
2096#else
2097static inline void register_freq_invariance_syscore_ops(void) {}
2098#endif
2099
2100static void init_freq_invariance(bool secondary, bool cppc_ready)
2101{
2102 bool ret = false;
2103
2104 if (!boot_cpu_has(X86_FEATURE_APERFMPERF))
2105 return;
2106
2107 if (secondary) {
2108 if (static_branch_likely(&arch_scale_freq_key)) {
2109 init_counter_refs();
2110 }
2111 return;
2112 }
2113
2114 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2115 ret = intel_set_max_freq_ratio();
2116 else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
2117 if (!cppc_ready) {
2118 return;
2119 }
2120 ret = amd_set_max_freq_ratio();
2121 }
2122
2123 if (ret) {
2124 init_counter_refs();
2125 static_branch_enable(&arch_scale_freq_key);
2126 register_freq_invariance_syscore_ops();
2127 pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
2128 } else {
2129 pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");
2130 }
2131}
2132
2133#ifdef CONFIG_ACPI_CPPC_LIB
2134static DEFINE_MUTEX(freq_invariance_lock);
2135
2136void init_freq_invariance_cppc(void)
2137{
2138 static bool secondary;
2139
2140 mutex_lock(&freq_invariance_lock);
2141
2142 init_freq_invariance(secondary, true);
2143 secondary = true;
2144
2145 mutex_unlock(&freq_invariance_lock);
2146}
2147#endif
2148
2149static void disable_freq_invariance_workfn(struct work_struct *work)
2150{
2151 static_branch_disable(&arch_scale_freq_key);
2152}
2153
2154static DECLARE_WORK(disable_freq_invariance_work,
2155 disable_freq_invariance_workfn);
2156
2157DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
2158
2159void arch_scale_freq_tick(void)
2160{
2161 u64 freq_scale = SCHED_CAPACITY_SCALE;
2162 u64 aperf, mperf;
2163 u64 acnt, mcnt;
2164
2165 if (!arch_scale_freq_invariant())
2166 return;
2167
2168 rdmsrl(MSR_IA32_APERF, aperf);
2169 rdmsrl(MSR_IA32_MPERF, mperf);
2170
2171 acnt = aperf - this_cpu_read(arch_prev_aperf);
2172 mcnt = mperf - this_cpu_read(arch_prev_mperf);
2173
2174 this_cpu_write(arch_prev_aperf, aperf);
2175 this_cpu_write(arch_prev_mperf, mperf);
2176
2177 if (check_shl_overflow(acnt, 2*SCHED_CAPACITY_SHIFT, &acnt))
2178 goto error;
2179
2180 if (check_mul_overflow(mcnt, arch_max_freq_ratio, &mcnt) || !mcnt)
2181 goto error;
2182
2183 freq_scale = div64_u64(acnt, mcnt);
2184 if (!freq_scale)
2185 goto error;
2186
2187 if (freq_scale > SCHED_CAPACITY_SCALE)
2188 freq_scale = SCHED_CAPACITY_SCALE;
2189
2190 this_cpu_write(arch_freq_scale, freq_scale);
2191 return;
2192
2193error:
2194 pr_warn("Scheduler frequency invariance went wobbly, disabling!\n");
2195 schedule_work(&disable_freq_invariance_work);
2196}
2197#else
2198static inline void init_freq_invariance(bool secondary, bool cppc_ready)
2199{
2200}
2201#endif
2202