diff --git a/sys/arch/aarch64/aarch64/cpu.c b/sys/arch/aarch64/aarch64/cpu.c index b14649b452c5..2fa72ad21615 100644 --- a/sys/arch/aarch64/aarch64/cpu.c +++ b/sys/arch/aarch64/aarch64/cpu.c @@ -67,16 +67,6 @@ static void cpu_identify2(device_t self, struct cpu_info *); static void cpu_setup_id(struct cpu_info *); static void cpu_setup_sysctl(device_t, struct cpu_info *); -#ifdef MULTIPROCESSOR -uint64_t cpu_mpidr[MAXCPUS]; - -volatile u_int aarch64_cpu_mbox[howmany(MAXCPUS, sizeof(u_int))] __cacheline_aligned = { 0 }; -volatile u_int aarch64_cpu_hatched[howmany(MAXCPUS, sizeof(u_int))] __cacheline_aligned = { 0 }; -u_int arm_cpu_max = 1; - -static kmutex_t cpu_hatch_lock; -#endif /* MULTIPROCESSOR */ - #ifdef MULTIPROCESSOR #define NCPUINFO MAXCPUS #else @@ -94,10 +84,6 @@ struct cpu_info cpu_info_store[NCPUINFO] = { } }; -struct cpu_info *cpu_info[NCPUINFO] __read_mostly = { - [0] = &cpu_info_store[0] -}; - void cpu_attach(device_t dv, cpuid_t id) { @@ -497,38 +483,6 @@ cpu_setup_sysctl(device_t dv, struct cpu_info *ci) } #ifdef MULTIPROCESSOR -void -cpu_boot_secondary_processors(void) -{ - u_int n, bit; - - if ((boothowto & RB_MD1) != 0) - return; - - mutex_init(&cpu_hatch_lock, MUTEX_DEFAULT, IPL_NONE); - - VPRINTF("%s: starting secondary processors\n", __func__); - - /* send mbox to have secondary processors do cpu_hatch() */ - for (n = 0; n < __arraycount(aarch64_cpu_mbox); n++) - atomic_or_uint(&aarch64_cpu_mbox[n], aarch64_cpu_hatched[n]); - __asm __volatile ("sev; sev; sev"); - - /* wait all cpus have done cpu_hatch() */ - for (n = 0; n < __arraycount(aarch64_cpu_mbox); n++) { - while (membar_consumer(), aarch64_cpu_mbox[n] & aarch64_cpu_hatched[n]) { - __asm __volatile ("wfe"); - } - /* Add processors to kcpuset */ - for (bit = 0; bit < 32; bit++) { - if (aarch64_cpu_hatched[n] & __BIT(bit)) - kcpuset_set(kcpuset_attached, n * 32 + bit); - } - } - - VPRINTF("%s: secondary processors hatched\n", __func__); -} - void cpu_hatch(struct cpu_info *ci) { @@ -556,23 +510,12 @@ cpu_hatch(struct cpu_info *ci) #endif /* - * clear my bit of aarch64_cpu_mbox to tell cpu_boot_secondary_processors(). + * clear my bit of arm_cpu_mbox to tell cpu_boot_secondary_processors(). * there are cpu0,1,2,3, and if cpu2 is unresponsive, * ci_index are each cpu0=0, cpu1=1, cpu2=undef, cpu3=2. * therefore we have to use device_unit instead of ci_index for mbox. */ - const u_int off = device_unit(ci->ci_dev) / 32; - const u_int bit = device_unit(ci->ci_dev) % 32; - atomic_and_uint(&aarch64_cpu_mbox[off], ~__BIT(bit)); - __asm __volatile ("sev; sev; sev"); -} -bool -cpu_hatched_p(u_int cpuindex) -{ - const u_int off = cpuindex / 32; - const u_int bit = cpuindex % 32; - membar_consumer(); - return (aarch64_cpu_hatched[off] & __BIT(bit)) != 0; + cpu_clr_mbox(device_unit(ci->ci_dev)); } #endif /* MULTIPROCESSOR */ diff --git a/sys/arch/aarch64/aarch64/locore.S b/sys/arch/aarch64/aarch64/locore.S index 6c050b19a394..03e063571ff3 100644 --- a/sys/arch/aarch64/aarch64/locore.S +++ b/sys/arch/aarch64/aarch64/locore.S @@ -449,35 +449,41 @@ mp_vstart: mrs x1, mpidr_el1 str x1, [x0, #CI_MPIDR] /* curcpu()->ci_mpidr = mpidr_el1 */ - mov x0, #32 - udiv x1, x27, x0 - adrl x0, _C_LABEL(aarch64_cpu_hatched) - add x28, x0, x1, lsl #2 /* x28 = &aarch64_cpu_hatched[cpuindex/32] */ + /* set topology information */ + mov x2, #0 + bl arm_cpu_topology_set + + /* x28 = &arm_cpu_hatched[cpuindex / (sizeof(u_long) * NBBY)] */ + adrl x0, _C_LABEL(arm_cpu_hatched) + mov x1, x27, lsr #6 + add x28, x0, x1, lsl #3 + + /* x29 = __BIT(cpuindex % (sizeof(u_long) * NBBY)) */ mov x0, #1 - mov x2, #32 - msub x1, x1, x2, x27 - lsl x29, x0, x1 /* x29 = 1 << (cpuindex % 32) */ + and x2, x27, #63 + lsl x29, x0, x2 /* - * atomic_or_uint(&aarch64_cpu_hatched[cpuindex/32], 1< + #ifdef __aarch64__ #ifdef _KERNEL_OPT @@ -104,7 +106,6 @@ struct cpu_info { uint64_t ci_acpiid; /* ACPI Processor Unique ID */ struct aarch64_sysctl_cpu_id ci_id; -#define arm_cpu_mpidr(ci) ((ci)->ci_id.ac_mpidr) struct aarch64_cache_info *ci_cacheinfo; struct aarch64_cpufuncs ci_cpufuncs; @@ -123,15 +124,12 @@ curcpu(void) #define setsoftast(ci) atomic_or_uint(&(ci)->ci_astpending, __BIT(0)) #define cpu_signotify(l) setsoftast((l)->l_cpu) -void cpu_proc_fork(struct proc *, struct proc *); -void cpu_need_proftick(struct lwp *l); -void cpu_boot_secondary_processors(void); -void cpu_mpstart(void); -void cpu_hatch(struct cpu_info *); +void cpu_need_proftick(struct lwp *l); + +void cpu_hatch(struct cpu_info *); extern struct cpu_info *cpu_info[]; -extern uint64_t cpu_mpidr[]; /* MULTIPROCESSOR */ -bool cpu_hatched_p(u_int); /* MULTIPROCESSOR */ +extern struct cpu_info cpu_info_store[]; #define CPU_INFO_ITERATOR cpuid_t #if defined(MULTIPROCESSOR) || defined(_MODULE) @@ -161,14 +159,8 @@ cpu_dosoftints(void) #endif } -void cpu_attach(device_t, cpuid_t); - #endif /* _KERNEL || _KMEMUSER */ -#elif defined(__arm__) - -#include - #endif #endif /* _AARCH64_CPU_H_ */ diff --git a/sys/arch/aarch64/include/machdep.h b/sys/arch/aarch64/include/machdep.h index d23ec9a3df9f..bb14967f595b 100644 --- a/sys/arch/aarch64/include/machdep.h +++ b/sys/arch/aarch64/include/machdep.h @@ -66,10 +66,6 @@ extern void (*cpu_powerdown_address)(void); extern char *booted_kernel; -#ifdef MULTIPROCESSOR -extern u_int arm_cpu_max; -#endif - /* * note that we use void * as all the platforms have different ideas on what * the structure is diff --git a/sys/arch/arm/acpi/cpu_acpi.c b/sys/arch/arm/acpi/cpu_acpi.c index ad4dfa87e12b..7359d28ea0c5 100644 --- a/sys/arch/arm/acpi/cpu_acpi.c +++ b/sys/arch/arm/acpi/cpu_acpi.c @@ -57,8 +57,6 @@ __KERNEL_RCSID(0, "$NetBSD: cpu_acpi.c,v 1.7 2019/10/19 18:04:26 jmcneill Exp $" #include #endif -extern struct cpu_info cpu_info_store[]; - static int cpu_acpi_match(device_t, cfdata_t, void *); static void cpu_acpi_attach(device_t, device_t, void *); diff --git a/sys/arch/arm/altera/cycv_platform.c b/sys/arch/arm/altera/cycv_platform.c index 9d0e183faeea..4444dec6b7cd 100644 --- a/sys/arch/arm/altera/cycv_platform.c +++ b/sys/arch/arm/altera/cycv_platform.c @@ -117,7 +117,7 @@ cycv_mpstart(void) int i; for (i = 0x10000000; i > 0; i--) { membar_consumer(); - if (arm_cpu_hatched == (1 << 1)) + if (cpu_hatched_p(1)) break; } if (i == 0) { diff --git a/sys/arch/arm/arm/arm_cpu_topology.c b/sys/arch/arm/arm/arm_cpu_topology.c index 3de8c88907f9..b8af1a7d7265 100644 --- a/sys/arch/arm/arm/arm_cpu_topology.c +++ b/sys/arch/arm/arm/arm_cpu_topology.c @@ -47,7 +47,7 @@ __KERNEL_RCSID(0, "$NetBSD: arm_cpu_topology.c,v 1.2 2020/01/16 06:34:24 mrg Exp #include void -arm_cpu_topology_set(struct cpu_info * const ci, uint64_t mpidr, bool slow) +arm_cpu_topology_set(struct cpu_info * const ci, mpidr_t mpidr) { #ifdef MULTIPROCESSOR uint pkgid, coreid, smtid, numaid = 0; @@ -61,7 +61,7 @@ arm_cpu_topology_set(struct cpu_info * const ci, uint64_t mpidr, bool slow) coreid = __SHIFTOUT(mpidr, MPIDR_AFF0); smtid = 0; } - cpu_topology_set(ci, pkgid, coreid, smtid, numaid, slow); + cpu_topology_set(ci, pkgid, coreid, smtid, numaid); #endif /* MULTIPROCESSOR */ } @@ -88,8 +88,7 @@ arm_cpu_do_topology(struct cpu_info *const newci) * mi_cpu_attach() is called and ncpu is bumped, so call it * directly here. This also handles the not-MP case. */ - arm_cpu_topology_set(newci, arm_cpu_mpidr(newci), - newci->ci_capacity_dmips_mhz < best_cap); + cpu_topology_setspeed(newci, newci->ci_capacity_dmips_mhz < best_cap); /* * Using saved largest capacity, refresh previous topology info. @@ -98,8 +97,8 @@ arm_cpu_do_topology(struct cpu_info *const newci) for (CPU_INFO_FOREACH(cii, ci)) { if (ci == newci) continue; - arm_cpu_topology_set(ci, arm_cpu_mpidr(ci), - ci->ci_capacity_dmips_mhz < best_cap); + cpu_topology_setspeed(newci, + newci->ci_capacity_dmips_mhz < best_cap); } #endif /* MULTIPROCESSOR */ } diff --git a/sys/arch/arm/arm/arm_machdep.c b/sys/arch/arm/arm/arm_machdep.c index babf163fa9e4..2643a0be7a2a 100644 --- a/sys/arch/arm/arm/arm_machdep.c +++ b/sys/arch/arm/arm/arm_machdep.c @@ -106,24 +106,22 @@ char machine_arch[] = MACHINE_ARCH; /* from */ extern const uint32_t undefinedinstruction_bounce[]; -/* Our exported CPU info; we can have only one. */ -struct cpu_info cpu_info_store = { - .ci_cpl = IPL_HIGH, - .ci_curlwp = &lwp0, - .ci_undefsave[2] = (register_t) undefinedinstruction_bounce, -#if defined(ARM_MMU_EXTENDED) && KERNEL_PID != 0 - .ci_pmap_asid_cur = KERNEL_PID, -#endif -}; - #ifdef MULTIPROCESSOR #define NCPUINFO MAXCPUS #else #define NCPUINFO 1 #endif -struct cpu_info *cpu_info[NCPUINFO] = { - [0] = &cpu_info_store +/* Our exported CPU info; we can have only one. */ +struct cpu_info cpu_info_store[NCPUINFO] = { + [0] = { + .ci_cpl = IPL_HIGH, + .ci_curlwp = &lwp0, + .ci_undefsave[2] = (register_t) undefinedinstruction_bounce, +#if defined(ARM_MMU_EXTENDED) && KERNEL_PID != 0 + .ci_pmap_asid_cur = KERNEL_PID, +#endif + } }; const pcu_ops_t * const pcu_ops_md_defs[PCU_UNIT_COUNT] = { diff --git a/sys/arch/arm/arm/armv6_start.S b/sys/arch/arm/arm/armv6_start.S index f0314fbe60fc..3a472fb52cea 100644 --- a/sys/arch/arm/arm/armv6_start.S +++ b/sys/arch/arm/arm/armv6_start.S @@ -178,14 +178,6 @@ arm_bad_vtopdiff: 1: b 1b ASEND(generic_start) -generic_vstart: - VPRINTF("go\n\r") - - /* - * Jump to start in locore.S, which in turn will call initarm and main. - */ - b start - /* * Save the u-boot arguments (including FDT address) and the virtual to physical * offset. @@ -464,10 +456,31 @@ generic_startv7: sub r0, R_VTOPDIFF // Return to virtual addess after the call to armv7_mmuinit - movw lr, #:lower16:generic_vstart - movt lr, #:upper16:generic_vstart + movw lr, #:lower16:generic_vstartv7 + movt lr, #:upper16:generic_vstartv7 b armv7_mmuinit +generic_vstartv7: + + /* r0 = &cpu_info_store[0] */ + movw r0, #:lower16:cpu_info_store + movt r0, #:upper16:cpu_info_store + + mrc p15, 0, r1, c0, c0, 0 // MIDR get + str r1, [r0, #CI_MIDR] + mrc p15, 0, r1, c0, c0, 5 // MPIDR get + str r1, [r0, #CI_MPIDR] + + mov r2, #0 + bl arm_cpu_topology_set + + VPRINTF("go\n\r") + + /* + * Jump to start in locore.S, which in turn will call initarm and main. + */ + b start + /* NOTREACHED */ .ltorg #elif defined(_ARM_ARCH_6) @@ -491,9 +504,17 @@ generic_startv6: ldr r0, =TEMP_L1_TABLE sub r0, R_VTOPDIFF - ldr lr, =generic_vstart + ldr lr, =generic_vstartv6 b armv6_mmuinit +generic_vstartv6: + VPRINTF("go\n\r") + + /* + * Jump to start in locore.S, which in turn will call initarm and main. + */ + b start + /* NOTREACHED */ .ltorg @@ -851,20 +872,38 @@ armv7_mpcontinuation: VPRINTF("go\n\r") - mov r0, R_INDEX // index into cpu_mpidr[] or cpu_number if not found + // index into cpu_mpidr[] or cpu_number if not found + mov r0, R_INDEX bl cpu_init_secondary_processor + /* r0 = &cpu_info_store[0] */ + movw r0, #:lower16:cpu_info_store + movt r0, #:upper16:cpu_info_store + + mov r4, #CPU_INFO_SIZE + mul r5, r4, R_INDEX + add r0, r5 + + mrc p15, 0, r1, c0, c0, 0 // MIDR get + str r1, [r0, #CI_MIDR] + mrc p15, 0, r1, c0, c0, 5 // MPIDR get + str r1, [r0, #CI_MPIDR] + + mov r2, #0 + bl arm_cpu_topology_set + /* - * Wait for cpu_boot_secondary_processors when cpu_info has - * been allocated, etc + * Wait for cpu_boot_secondary_processors */ + /* r6 = &arm_cpu_mbox[0] */ movw r6, #:lower16:arm_cpu_mbox movt r6, #:upper16:arm_cpu_mbox mov r5, #1 // bitmask... lsl r5, R_INDEX // ... for our cpu + /* wait for the mailbox start bit to become true */ 1: dmb // data memory barrier ldr r2, [r6] // load mbox tst r2, r5 // is our bit set? @@ -872,7 +911,7 @@ armv7_mpcontinuation: beq 1b // no, and try again movw r0, #:lower16:cpu_info - movt r0, #:upper16:cpu_info // get pointer to cpu_infos + movt r0, #:upper16:cpu_info // get pointer to cpu_info ldr r5, [r0, R_INDEX, lsl #2] // load our cpu_info ldr r6, [r5, #CI_IDLELWP] // get the idlelwp ldr r7, [r6, #L_PCB] // now get its pcb diff --git a/sys/arch/arm/arm/cpu_subr.c b/sys/arch/arm/arm/cpu_subr.c new file mode 100644 index 000000000000..a767888b51c2 --- /dev/null +++ b/sys/arch/arm/arm/cpu_subr.c @@ -0,0 +1,142 @@ +/* $NetBSD$ */ + +/*- + * Copyright (c) 2020 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Nick Hudson + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "opt_cputypes.h" +#include "opt_multiprocessor.h" + +#include +__KERNEL_RCSID(0, "$NetBSD$"); + +#include +#include +#include +#include + +#ifdef VERBOSE_INIT_ARM +#define VPRINTF(...) printf(__VA_ARGS__) +#else +#define VPRINTF(...) __nothing +#endif + +#ifdef MULTIPROCESSOR +#define NCPUINFO MAXCPUS +#else +#define NCPUINFO 1 +#endif /* MULTIPROCESSOR */ + +mpidr_t cpu_mpidr[NCPUINFO] = { + [0 ... NCPUINFO - 1] = ~0, +}; + +struct cpu_info *cpu_info[NCPUINFO] __read_mostly = { + [0] = &cpu_info_store[0] +}; + +#ifdef MULTIPROCESSOR + +#define CPUINDEX_DIVISOR (sizeof(u_long) * NBBY) + +volatile u_long arm_cpu_hatched[howmany(MAXCPUS, CPUINDEX_DIVISOR)] __cacheline_aligned = { 0 }; +volatile u_long arm_cpu_mbox[howmany(MAXCPUS, CPUINDEX_DIVISOR)] __cacheline_aligned = { 0 }; +u_int arm_cpu_max = 1; + +kmutex_t cpu_hatch_lock; + +void +cpu_boot_secondary_processors(void) +{ + u_int cpuno; + + if ((boothowto & RB_MD1) != 0) + return; + + mutex_init(&cpu_hatch_lock, MUTEX_DEFAULT, IPL_NONE); + + VPRINTF("%s: starting secondary processors\n", __func__); + + /* send mbox to have secondary processors do cpu_hatch() */ + for (size_t n = 0; n < __arraycount(arm_cpu_mbox); n++) + atomic_or_ulong(&arm_cpu_mbox[n], arm_cpu_hatched[n]); + + __asm __volatile ("sev; sev; sev"); + + /* wait all cpus have done cpu_hatch() */ + for (cpuno = 1; cpuno < ncpu; cpuno++) { + if (!cpu_hatched_p(cpuno)) + continue; + + const size_t off = cpuno / CPUINDEX_DIVISOR; + const u_long bit = __BIT(cpuno % CPUINDEX_DIVISOR); + + while (membar_consumer(), arm_cpu_mbox[off] & bit) { + __asm __volatile ("wfe"); + } + /* Add processor to kcpuset */ + kcpuset_set(kcpuset_attached, cpuno); + } + + VPRINTF("%s: secondary processors hatched\n", __func__); +} + +bool +cpu_hatched_p(u_int cpuindex) +{ + const u_int off = cpuindex / CPUINDEX_DIVISOR; + const u_int bit = cpuindex % CPUINDEX_DIVISOR; + + membar_consumer(); + return (arm_cpu_hatched[off] & __BIT(bit)) != 0; +} + +void +cpu_set_hatched(int cpuindex) +{ + + const size_t off = cpuindex / CPUINDEX_DIVISOR; + const u_long bit = __BIT(cpuindex % CPUINDEX_DIVISOR); + + atomic_or_ulong(&arm_cpu_hatched[off], bit); +} + +void +cpu_clr_mbox(int cpuindex) +{ + + const size_t off = cpuindex / CPUINDEX_DIVISOR; + const u_long bit = __BIT(cpuindex % CPUINDEX_DIVISOR); + + /* Notify cpu_boot_secondary_processors that we're done */ + atomic_and_ulong(&arm_cpu_mbox[off], ~bit); + membar_producer(); + __asm __volatile("sev; sev; sev"); +} + +#endif diff --git a/sys/arch/arm/arm32/arm32_boot.c b/sys/arch/arm/arm32/arm32_boot.c index 07551c0157c0..878b11577fef 100644 --- a/sys/arch/arm/arm32/arm32_boot.c +++ b/sys/arch/arm/arm32/arm32_boot.c @@ -158,10 +158,6 @@ __KERNEL_RCSID(1, "$NetBSD: arm32_boot.c,v 1.36 2020/01/08 18:47:43 jmcneill Exp #define VPRINTF(...) __nothing #endif -#ifdef MULTIPROCESSOR -static kmutex_t cpu_hatch_lock; -#endif - vaddr_t initarm_common(vaddr_t kvm_base, vsize_t kvm_size, const struct boot_physmem *bp, size_t nbp) @@ -323,8 +319,6 @@ initarm_common(vaddr_t kvm_base, vsize_t kvm_size, #endif #ifdef MULTIPROCESSOR - mutex_init(&cpu_hatch_lock, MUTEX_DEFAULT, IPL_NONE); - /* * Ensure BP cache is flushed to memory so that APs start cache * coherency with correct view. @@ -354,9 +348,8 @@ cpu_hatch(struct cpu_info *ci, u_int cpuindex, void (*md_cpu_init)(struct cpu_in splhigh(); VPRINTF("%s(%s): ", __func__, cpu_name(ci)); + /* mpidr/midr filled in by armv7_mpcontinuation */ ci->ci_ctrl = armreg_sctlr_read(); - uint32_t mpidr = armreg_mpidr_read(); - ci->ci_mpidr = mpidr; ci->ci_arm_cpuid = cpu_idnum(); ci->ci_arm_cputype = ci->ci_arm_cpuid & CPU_ID_CPU_MASK; ci->ci_arm_cpurev = ci->ci_arm_cpuid & CPU_ID_REVISION_MASK; @@ -424,9 +417,6 @@ cpu_hatch(struct cpu_info *ci, u_int cpuindex, void (*md_cpu_init)(struct cpu_in VPRINTF(" done!\n"); - /* Notify cpu_boot_secondary_processors that we're done */ - atomic_and_32(&arm_cpu_mbox, ~__BIT(cpuindex)); - membar_producer(); - __asm __volatile("sev; sev; sev"); + cpu_clr_mbox(cpuindex); } #endif /* MULTIPROCESSOR */ diff --git a/sys/arch/arm/arm32/arm32_machdep.c b/sys/arch/arm/arm32/arm32_machdep.c index 2c9f9d3e93eb..88db3dcc1f2b 100644 --- a/sys/arch/arm/arm32/arm32_machdep.c +++ b/sys/arch/arm/arm32/arm32_machdep.c @@ -472,7 +472,7 @@ SYSCTL_SETUP(sysctl_machdep_setup, "sysctl machdep subtree setup") sysctl_createv(clog, 0, NULL, NULL, CTLFLAG_PERMANENT|CTLFLAG_READONLY, CTLTYPE_INT, "fpu_id", NULL, - NULL, 0, &cpu_info_store.ci_vfp_id, 0, + NULL, 0, &cpu_info_store[0].ci_vfp_id, 0, CTL_MACHDEP, CTL_CREATE, CTL_EOL); #endif sysctl_createv(clog, 0, NULL, NULL, @@ -735,7 +735,6 @@ void cpu_init_secondary_processor(int cpuindex) { // pmap_kernel has been successfully built and we can switch to it - cpu_domains(DOMAIN_DEFAULT); cpu_idcache_wbinv_all(); @@ -777,29 +776,15 @@ cpu_init_secondary_processor(int cpuindex) VPRINTS(")"); #endif - VPRINTS(" hatched="); - VPRINTX(arm_cpu_hatched | __BIT(cpuindex)); + VPRINTS(" hatched|="); + VPRINTX(__BIT(cpuindex)); VPRINTS("\n\r"); - atomic_or_uint(&arm_cpu_hatched, __BIT(cpuindex)); + cpu_set_hatched(cpuindex); /* return to assembly to wait for cpu_boot_secondary_processors */ } -void -cpu_boot_secondary_processors(void) -{ - VPRINTF("%s: writing mbox with %#x\n", __func__, arm_cpu_hatched); - arm_cpu_mbox = arm_cpu_hatched; - membar_producer(); -#ifdef _ARM_ARCH_7 - __asm __volatile("sev; sev; sev"); -#endif - while (membar_consumer(), arm_cpu_mbox) { - __asm __volatile("wfe" ::: "memory"); - } -} - void xc_send_ipi(struct cpu_info *ci) { diff --git a/sys/arch/arm/arm32/cpu.c b/sys/arch/arm/arm32/cpu.c index a0fec67aa186..e71578da44ae 100644 --- a/sys/arch/arm/arm32/cpu.c +++ b/sys/arch/arm/arm32/cpu.c @@ -54,6 +54,7 @@ __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.143 2020/01/22 12:23:12 skrll Exp $"); #include #include #include +#include #include #include @@ -65,14 +66,6 @@ __KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.143 2020/01/22 12:23:12 skrll Exp $"); extern const char *cpu_arch; #ifdef MULTIPROCESSOR -uint32_t cpu_mpidr[MAXCPUS] = { - [0 ... MAXCPUS - 1] = ~0, -}; - -volatile u_int arm_cpu_hatched __cacheline_aligned = 0; -volatile uint32_t arm_cpu_mbox __cacheline_aligned = 0; -u_int arm_cpu_max = 1; - #ifdef MPDEBUG uint32_t arm_cpu_marker[2] __cacheline_aligned = { 0, 0 }; #endif @@ -107,19 +100,23 @@ cpu_attach(device_t dv, cpuid_t id) ci->ci_arm_cpuid = cpu_idnum(); ci->ci_arm_cputype = ci->ci_arm_cpuid & CPU_ID_CPU_MASK; ci->ci_arm_cpurev = ci->ci_arm_cpuid & CPU_ID_REVISION_MASK; -#ifdef MULTIPROCESSOR - uint32_t mpidr = armreg_mpidr_read(); - ci->ci_mpidr = mpidr; -#endif } else { #ifdef MULTIPROCESSOR + if ((boothowto & RB_MD1) != 0) { + aprint_naive("\n"); + aprint_normal(": multiprocessor boot disabled\n"); + return; + } + + KASSERT(unit < MAXCPUS); + ci = &cpu_info_store[unit]; + KASSERT(cpu_info[unit] == NULL); - ci = kmem_zalloc(sizeof(*ci), KM_SLEEP); ci->ci_cpl = IPL_HIGH; ci->ci_cpuid = id; - ci->ci_data.cpu_cc_freq = cpu_info_store.ci_data.cpu_cc_freq; + ci->ci_data.cpu_cc_freq = cpu_info_store[0].ci_data.cpu_cc_freq; - ci->ci_undefsave[2] = cpu_info_store.ci_undefsave[2]; + ci->ci_undefsave[2] = cpu_info_store[0].ci_undefsave[2]; cpu_info[unit] = ci; if (cpu_hatched_p(unit) == false) { @@ -233,15 +230,6 @@ cpu_attach(device_t dv, cpuid_t id) vfp_attach(ci); /* XXX SMP */ } -#ifdef MULTIPROCESSOR -bool -cpu_hatched_p(u_int cpuindex) -{ - membar_consumer(); - return (arm_cpu_hatched & __BIT(cpuindex)) != 0; -} -#endif - enum cpu_class { CPU_CLASS_NONE, CPU_CLASS_ARM2, diff --git a/sys/arch/arm/arm32/genassym.cf b/sys/arch/arm/arm32/genassym.cf index afcbd20c00d2..c9265580fdaa 100644 --- a/sys/arch/arm/arm32/genassym.cf +++ b/sys/arch/arm/arm32/genassym.cf @@ -219,11 +219,15 @@ define CF_CONTEXT_SWITCH offsetof(struct cpu_functions, cf_context_switch) define CF_SLEEP offsetof(struct cpu_functions, cf_sleep) define CF_CONTROL offsetof(struct cpu_functions, cf_control) +define CPU_INFO_SIZE sizeof(struct cpu_info) define CI_ARM_CPUID offsetof(struct cpu_info, ci_arm_cpuid) +define CI_ASTPENDING offsetof(struct cpu_info, ci_astpending) define CI_CURLWP offsetof(struct cpu_info, ci_curlwp) +define CI_CURPRIORITY offsetof(struct cpu_info, ci_schedstate.spc_curpriority) define CI_CPL offsetof(struct cpu_info, ci_cpl) -define CI_ASTPENDING offsetof(struct cpu_info, ci_astpending) define CI_INTR_DEPTH offsetof(struct cpu_info, ci_intr_depth) +define CI_MIDR offsetof(struct cpu_info, ci_midr) +define CI_MPIDR offsetof(struct cpu_info, ci_mpidr) define CI_MTX_COUNT offsetof(struct cpu_info, ci_mtx_count) define CI_UNDEFSAVE offsetof(struct cpu_info, ci_undefsave[0]) if defined(EXEC_AOUT) diff --git a/sys/arch/arm/broadcom/bcm2835_intr.c b/sys/arch/arm/broadcom/bcm2835_intr.c index 60c5dfd323d3..fc5dc6146dc4 100644 --- a/sys/arch/arm/broadcom/bcm2835_intr.c +++ b/sys/arch/arm/broadcom/bcm2835_intr.c @@ -337,7 +337,7 @@ bcm2835_irq_handler(void *frame) { struct cpu_info * const ci = curcpu(); const int oldipl = ci->ci_cpl; - const cpuid_t cpuid = __SHIFTOUT(arm_cpu_mpidr(ci), MPIDR_AFF0); + const cpuid_t cpuid = ci->ci_core_id; const uint32_t oldipl_mask = __BIT(oldipl); int ipl_mask = 0; @@ -700,7 +700,7 @@ static int bcm2836mp_pic_find_pending_irqs(struct pic_softc *pic) { struct cpu_info * const ci = curcpu(); - const cpuid_t cpuid = __SHIFTOUT(arm_cpu_mpidr(ci), MPIDR_AFF0); + const cpuid_t cpuid = ci->ci_core_id; uint32_t lpending; int ipl = 0; @@ -741,7 +741,7 @@ bcm2836mp_pic_source_name(struct pic_softc *pic, int irq, char *buf, size_t len) #if defined(MULTIPROCESSOR) static void bcm2836mp_cpu_init(struct pic_softc *pic, struct cpu_info *ci) { - const cpuid_t cpuid = __SHIFTOUT(arm_cpu_mpidr(ci), MPIDR_AFF0); + const cpuid_t cpuid = ci->ci_core_id; KASSERT(cpuid < BCM2836_NCPUS); @@ -768,7 +768,7 @@ int bcm2836mp_ipi_handler(void *priv) { const struct cpu_info *ci = curcpu(); - const cpuid_t cpuid = __SHIFTOUT(arm_cpu_mpidr(ci), MPIDR_AFF0); + const cpuid_t cpuid = ci->ci_core_id; uint32_t ipimask, bit; KASSERT(cpuid < BCM2836_NCPUS); @@ -817,7 +817,7 @@ bcm2836mp_ipi_handler(void *priv) static void bcm2836mp_intr_init(void *priv, struct cpu_info *ci) { - const cpuid_t cpuid = __SHIFTOUT(arm_cpu_mpidr(ci), MPIDR_AFF0); + const cpuid_t cpuid = ci->ci_core_id; struct pic_softc * const pic = &bcm2836mp_pic[cpuid]; KASSERT(cpuid < BCM2836_NCPUS); diff --git a/sys/arch/arm/conf/files.arm b/sys/arch/arm/conf/files.arm index 1a8b642c0ad1..1474c2712859 100644 --- a/sys/arch/arm/conf/files.arm +++ b/sys/arch/arm/conf/files.arm @@ -200,6 +200,7 @@ file arch/arm/arm/cpufunc_asm_ixp12x0.S cpu_ixp12x0 file arch/arm/arm/cpufunc_asm_sheeva.S cpu_sheeva file arch/arm/arm/cpu_exec.c file arch/arm/arm/cpu_in_cksum.S (inet | inet6) & cpu_in_cksum +file arch/arm/arm/cpu_subr.c file arch/arm/arm/fusu.S file arch/arm/arm/idle_machdep.c file arch/arm/arm/lock_cas.S diff --git a/sys/arch/arm/include/arm32/machdep.h b/sys/arch/arm/include/arm32/machdep.h index eae1c64fa6bf..1896ca4b018d 100644 --- a/sys/arch/arm/include/arm32/machdep.h +++ b/sys/arch/arm/include/arm32/machdep.h @@ -48,13 +48,8 @@ struct bootmem_info { extern struct bootmem_info bootmem_info; extern char *booted_kernel; - -extern volatile uint32_t arm_cpu_hatched; -extern volatile uint32_t arm_cpu_mbox; -extern u_int arm_cpu_max; extern u_long kern_vtopdiff; - /* misc prototypes used by the many arm machdeps */ void cortex_pmc_ccnt_init(void); void cpu_hatch(struct cpu_info *, u_int, void (*)(struct cpu_info *)); diff --git a/sys/arch/arm/include/cpu.h b/sys/arch/arm/include/cpu.h index 56edeb643d98..a710c37fca59 100644 --- a/sys/arch/arm/include/cpu.h +++ b/sys/arch/arm/include/cpu.h @@ -48,6 +48,28 @@ #ifndef _ARM_CPU_H_ #define _ARM_CPU_H_ +typedef unsigned long mpidr_t; + +#ifdef MULTIPROCESSOR +extern u_int arm_cpu_max; +extern mpidr_t cpu_mpidr[]; +extern kmutex_t cpu_hatch_lock; + +void cpu_boot_secondary_processors(void); +void cpu_mpstart(void); +bool cpu_hatched_p(u_int); + +void cpu_clr_mbox(int); +void cpu_set_hatched(int); + +/* + * cpu device glue (belongs in cpuvar.h) + */ +void cpu_attach(device_t, cpuid_t); +#endif + +void cpu_proc_fork(struct proc *, struct proc *); + #ifdef __arm__ /* @@ -178,7 +200,6 @@ struct cpu_info { uint32_t ci_midr; uint32_t ci_mpidr; -#define arm_cpu_mpidr(ci) ((ci)->ci_mpidr) uint32_t ci_capacity_dmips_mhz; struct arm_cache_info * @@ -189,7 +210,7 @@ struct cpu_info { #endif }; -extern struct cpu_info cpu_info_store; +extern struct cpu_info cpu_info_store[]; struct lwp *arm_curlwp(void); struct cpu_info *arm_curcpu(void); @@ -228,7 +249,7 @@ curcpu(void) return (struct cpu_info *) armreg_tpidrprw_read(); } #elif !defined(MULTIPROCESSOR) -#define curcpu() (&cpu_info_store) +#define curcpu() (&cpu_info_store[0]) #elif !defined(__HAVE_PREEMPTION) #error MULTIPROCESSOR && !__HAVE_PREEMPTION requires TPIDRPRW_IS_CURCPU or TPIDRPRW_IS_CURLWP #else @@ -255,16 +276,10 @@ extern struct cpu_info *cpu_info[]; #endif #if defined(MULTIPROCESSOR) - -extern uint32_t cpu_mpidr[]; -bool cpu_hatched_p(u_int); - -void cpu_mpstart(void); void cpu_init_secondary_processor(int); -void cpu_boot_secondary_processors(void); #endif -#define LWP0_CPU_INFO (&cpu_info_store) +#define LWP0_CPU_INFO (&cpu_info_store[0]) static inline int curcpl(void) @@ -291,8 +306,6 @@ cpu_dosoftints(void) #endif } -void cpu_proc_fork(struct proc *, struct proc *); - /* * Scheduling glue */ @@ -324,11 +337,6 @@ void cpu_proc_fork(struct proc *, struct proc *); */ vaddr_t cpu_uarea_alloc_idlelwp(struct cpu_info *); -/* - * cpu device glue (belongs in cpuvar.h) - */ -void cpu_attach(device_t, cpuid_t); - #ifdef _ARM_ARCH_6 int cpu_maxproc_hook(int); #endif diff --git a/sys/arch/arm/include/cpu_topology.h b/sys/arch/arm/include/cpu_topology.h index 268bbb9ad993..77c6c0856a81 100644 --- a/sys/arch/arm/include/cpu_topology.h +++ b/sys/arch/arm/include/cpu_topology.h @@ -36,24 +36,23 @@ * * arm_cpu_do_topology(cpuinfo) * - * Call arm_cpu_do_topology() in cpu_attach() after making sure that - * arm_cpu_mpidr() will work for this CPU, and it will split up - * package/core/smt IDs. + * Call arm_cpu_do_topology() in cpu_attach(). It will update the topology + * view of relative speed. * - * The CPU frontend can set the "capacity_dmips_mhz" property for - * this CPU device, and arm_cpu_set_topology() will calculate the - * best way to call cpu_topology_set() for the known system. + * The CPU frontend can set the "capacity_dmips_mhz" property for this CPU, + * and arm_cpu_do_topology() will update the system view of this and other + * CPUs relative speeds * - * arm_cpu_topology_set(cpuinfo, mpidr, slow) + * arm_cpu_topology_set(cpuinfo, mpidr) * * arm_cpu_topology_set() is provided for locore and the boot CPU, - * and only works for the current CPU. + * and application CPUs. It updates data for the current CPU. */ #include #include void arm_cpu_do_topology(struct cpu_info * const); -void arm_cpu_topology_set(struct cpu_info * const, uint64_t, bool); +void arm_cpu_topology_set(struct cpu_info * const, mpidr_t); #endif /* _ARM_CPU_TOPOLOGY_H_ */ diff --git a/sys/arch/arm/include/locore.h b/sys/arch/arm/include/locore.h index 059c2665d8d8..494640984528 100644 --- a/sys/arch/arm/include/locore.h +++ b/sys/arch/arm/include/locore.h @@ -167,10 +167,6 @@ extern int cpu_media_and_vfp_features[2]; extern bool arm_has_tlbiasid_p; extern bool arm_has_mpext_p; -#ifdef MULTIPROCESSOR -extern u_int arm_cpu_max; -extern volatile u_int arm_cpu_hatched; -#endif #if !defined(CPU_ARMV7) #define CPU_IS_ARMV7_P() false diff --git a/sys/arch/arm/mainbus/cpu_mainbus.c b/sys/arch/arm/mainbus/cpu_mainbus.c index fdcd15a06d7f..23b22b3569c6 100644 --- a/sys/arch/arm/mainbus/cpu_mainbus.c +++ b/sys/arch/arm/mainbus/cpu_mainbus.c @@ -67,9 +67,7 @@ static void cpu_mainbus_attach(device_t, device_t, void *); * Probe for the main cpu. Currently all this does is return 1 to * indicate that the cpu was found. */ -#ifdef MULTIPROCESSOR -extern u_int arm_cpu_max; -#else +#ifndef MULTIPROCESSOR #define arm_cpu_max 1 #endif diff --git a/sys/arch/arm/nvidia/soc_tegra124.c b/sys/arch/arm/nvidia/soc_tegra124.c index 0efc9600535d..213e23c63fbe 100644 --- a/sys/arch/arm/nvidia/soc_tegra124.c +++ b/sys/arch/arm/nvidia/soc_tegra124.c @@ -70,21 +70,28 @@ tegra124_mpstart(void) (uint32_t)KERN_VTOPHYS((vaddr_t)cpu_mpstart)); bus_space_barrier(bst, bsh, EVP_RESET_VECTOR_0_REG, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); - uint32_t started = 0; - tegra_pmc_power(PMC_PARTID_CPU1, true); started |= __BIT(1); - tegra_pmc_power(PMC_PARTID_CPU2, true); started |= __BIT(2); - tegra_pmc_power(PMC_PARTID_CPU3, true); started |= __BIT(3); - - u_int i; - for (i = 0x10000000; i > 0; i--) { - arm_dmb(); - if (arm_cpu_hatched == started) - break; - } - if (i == 0) { - ret++; - aprint_error("cpu%d: WARNING: AP failed to start\n", i); + for (u_int cpuindex = 1; cpuindex < arm_cpu_max; cpuindex++) { + static u_int tegra_cpu_pmu[] = { + 0, + PMC_PARTID_CPU1, + PMC_PARTID_CPU2, + PMC_PARTID_CPU3 + }; + + tegra_pmc_power(tegra_cpu_pmu[cpuindex], true); + + u_int i; + for (i = 0x10000000; i > 0; i--) { + if (cpu_hatched_p(cpuindex)) + break; + } + + if (i == 0) { + ret++; + aprint_error("cpu%d: WARNING: AP failed to start\n", + cpuindex); + } } #endif return ret; diff --git a/sys/arch/arm/samsung/exynos_platform.c b/sys/arch/arm/samsung/exynos_platform.c index d020c80066a5..0db308df7fa6 100644 --- a/sys/arch/arm/samsung/exynos_platform.c +++ b/sys/arch/arm/samsung/exynos_platform.c @@ -168,7 +168,7 @@ exynos5800_mpstart(void) /* Wait for AP to start */ for (n = 0x100000; n > 0; n--) { membar_consumer(); - if (arm_cpu_hatched & __BIT(cpuindex)) + if (cpu_hatched_p(cpuindex)) break; } if (n == 0) { diff --git a/sys/arch/arm/vexpress/vexpress_platform.c b/sys/arch/arm/vexpress/vexpress_platform.c index 9cde03b02d01..80bc32345e53 100644 --- a/sys/arch/arm/vexpress/vexpress_platform.c +++ b/sys/arch/arm/vexpress/vexpress_platform.c @@ -120,11 +120,6 @@ vexpress_a15_smp_init(void) #ifdef MULTIPROCESSOR bus_space_tag_t gicd_bst = &armv7_generic_bs_tag; bus_space_handle_t gicd_bsh; - int started = 0; - - /* Bitmask of CPUs (non-BSP) to start */ - for (int i = 1; i < arm_cpu_max; i++) - started |= __BIT(i); /* Write init vec to SYS_FLAGS register */ SYSREG_WRITE(SYS_FLAGSCLR, 0xffffffff); @@ -142,16 +137,19 @@ vexpress_a15_smp_init(void) const uint32_t sgir = GICD_SGIR_TargetListFilter_NotMe; bus_space_write_4(gicd_bst, gicd_bsh, GICD_SGIR, sgir); - /* Wait for APs to start */ - u_int i; - for (i = 0x10000000; i > 0; i--) { - arm_dmb(); - if (arm_cpu_hatched == started) - break; - } - if (i == 0) { - aprint_error("WARNING: AP failed to start\n"); - ret++; + /* Bitmask of CPUs (non-BSP) to start */ + for (u_int cpuindex = 1; cpuindex < arm_cpu_max; cpuindex++) { + u_int i; + for (i = 0x10000000; i > 0; i--) { + if (cpu_hatched_p(cpuindex)) + break; + } + + if (i == 0) { + ret++; + aprint_error("cpu%d: WARNING: AP failed to start\n", + cpuindex); + } } /* Disable GIC distributor */ diff --git a/sys/arch/evbarm/bcm53xx/bcm53xx_machdep.c b/sys/arch/evbarm/bcm53xx/bcm53xx_machdep.c index f9e56561c934..2d23c877e28a 100644 --- a/sys/arch/evbarm/bcm53xx/bcm53xx_machdep.c +++ b/sys/arch/evbarm/bcm53xx/bcm53xx_machdep.c @@ -255,7 +255,7 @@ bcm53xx_mpstart(void) break; } for (size_t i = 1; i < arm_cpu_max; i++) { - if ((arm_cpu_hatched & __BIT(i)) == 0) { + if (cpu_hatched_p(i)) {) printf("%s: warning: cpu%zu failed to hatch\n", __func__, i); } diff --git a/sys/arch/evbarm/beagle/beagle_machdep.c b/sys/arch/evbarm/beagle/beagle_machdep.c index eb81f4ffee9f..f6fd3c4bf67c 100644 --- a/sys/arch/evbarm/beagle/beagle_machdep.c +++ b/sys/arch/evbarm/beagle/beagle_machdep.c @@ -497,7 +497,7 @@ beagle_mpstart(void) break; } for (size_t i = 1; i < arm_cpu_max; i++) { - if ((arm_cpu_hatched & __BIT(i)) == 0) { + if (cpu_hatched_p(i)) { printf("%s: warning: cpu%zu failed to hatch\n", __func__, i); } diff --git a/sys/arch/evbarm/gumstix/gumstix_machdep.c b/sys/arch/evbarm/gumstix/gumstix_machdep.c index 704caace3dfb..d47c873c13e3 100644 --- a/sys/arch/evbarm/gumstix/gumstix_machdep.c +++ b/sys/arch/evbarm/gumstix/gumstix_machdep.c @@ -545,7 +545,7 @@ gumstix_mpstart(void) break; } for (size_t i = 1; i < arm_cpu_max; i++) { - if ((arm_cpu_hatched & __BIT(i)) == 0) { + if (cpu_hatched_p(i)) { printf("%s: warning: cpu%zu failed to hatch\n", __func__, i); } diff --git a/sys/arch/evbarm/imx7/imx7_machdep.c b/sys/arch/evbarm/imx7/imx7_machdep.c index 50bdaef9f0c3..aa8e8f413b3e 100644 --- a/sys/arch/evbarm/imx7/imx7_machdep.c +++ b/sys/arch/evbarm/imx7/imx7_machdep.c @@ -219,7 +219,7 @@ imx7_mpstart(void) break; } for (size_t i = 1; i < arm_cpu_max; i++) { - if ((arm_cpu_hatched & __BIT(i)) == 0) { + if (cpu_hatched_p(i)) { printf("%s: warning: cpu%zu failed to hatch\n", __func__, i); } diff --git a/sys/arch/evbarm/nitrogen6/nitrogen6_machdep.c b/sys/arch/evbarm/nitrogen6/nitrogen6_machdep.c index 37a928a1cdb5..7a6092a96e50 100644 --- a/sys/arch/evbarm/nitrogen6/nitrogen6_machdep.c +++ b/sys/arch/evbarm/nitrogen6/nitrogen6_machdep.c @@ -246,7 +246,7 @@ nitrogen6_mpstart(void) break; } for (size_t i = 1; i < arm_cpu_max; i++) { - if ((arm_cpu_hatched & __BIT(i)) == 0) { + if (cpu_hatched_p((i)) { printf("%s: warning: cpu%zu failed to hatch\n", __func__, i); } diff --git a/sys/arch/evbarm/zynq/zynq_machdep.c b/sys/arch/evbarm/zynq/zynq_machdep.c index 6ee3a5008852..27d11e785ac3 100644 --- a/sys/arch/evbarm/zynq/zynq_machdep.c +++ b/sys/arch/evbarm/zynq/zynq_machdep.c @@ -234,7 +234,7 @@ zynq_mpstart(void) break; } for (size_t i = 1; i < arm_cpu_max; i++) { - if ((arm_cpu_hatched & __BIT(i)) == 0) { + if (cpu_hatched_p(i)) { ret++; printf("%s: warning: cpu%zu failed to hatch\n", __func__, i); diff --git a/sys/arch/macppc/macppc/cpu.c b/sys/arch/macppc/macppc/cpu.c index 3f7ec7f2e7b9..eff599404bf2 100644 --- a/sys/arch/macppc/macppc/cpu.c +++ b/sys/arch/macppc/macppc/cpu.c @@ -175,7 +175,7 @@ cpuattach(device_t parent, device_t self, void *aux) core = package & 1; package >>= 1; } - cpu_topology_set(ci, package, core, 0, 0, false); + cpu_topology_set(ci, package, core, 0, 0); if (ci->ci_khz == 0) { cpu_OFgetspeed(self, ci); diff --git a/sys/arch/mips/mips/cpu_subr.c b/sys/arch/mips/mips/cpu_subr.c index 1c474d47ff72..afb53385392e 100644 --- a/sys/arch/mips/mips/cpu_subr.c +++ b/sys/arch/mips/mips/cpu_subr.c @@ -189,7 +189,7 @@ cpu_info_alloc(struct pmap_tlb_info *ti, cpuid_t cpu_id, cpuid_t cpu_package_id, ci->ci_divisor_recip = cpu_info_store.ci_divisor_recip; ci->ci_cpuwatch_count = cpu_info_store.ci_cpuwatch_count; - cpu_topology_set(ci, cpu_package_id, cpu_core_id, cpu_smt_id, 0, false); + cpu_topology_set(ci, cpu_package_id, cpu_core_id, cpu_smt_id, 0); pmap_md_alloc_ephemeral_address_space(ci); diff --git a/sys/arch/x86/x86/cpu_topology.c b/sys/arch/x86/x86/cpu_topology.c index f73b7f65052c..3b4e7ae8fc51 100644 --- a/sys/arch/x86/x86/cpu_topology.c +++ b/sys/arch/x86/x86/cpu_topology.c @@ -95,14 +95,14 @@ x86_cpu_topology(struct cpu_info *ci) case CPUVENDOR_INTEL: if (cpu_family < 6) { cpu_topology_set(ci, package_id, core_id, smt_id, - numa_id, false); + numa_id); return; } break; case CPUVENDOR_AMD: if (cpu_family < 0xf) { cpu_topology_set(ci, package_id, core_id, smt_id, - numa_id, false); + numa_id); return; } break; @@ -211,5 +211,5 @@ x86_cpu_topology(struct cpu_info *ci) smt_id = __SHIFTOUT(apic_id, smt_mask); } - cpu_topology_set(ci, package_id, core_id, smt_id, numa_id, false); + cpu_topology_set(ci, package_id, core_id, smt_id, numa_id); } diff --git a/sys/kern/subr_cpu.c b/sys/kern/subr_cpu.c index 24ed29c0a27d..a54e7e125a0d 100644 --- a/sys/kern/subr_cpu.c +++ b/sys/kern/subr_cpu.c @@ -142,23 +142,33 @@ cpu_softintr_p(void) */ void cpu_topology_set(struct cpu_info *ci, u_int package_id, u_int core_id, - u_int smt_id, u_int numa_id, bool slow) + u_int smt_id, u_int numa_id) { enum cpu_rel rel; cpu_topology_present = true; - cpu_topology_haveslow |= slow; ci->ci_package_id = package_id; ci->ci_core_id = core_id; ci->ci_smt_id = smt_id; ci->ci_numa_id = numa_id; - ci->ci_is_slow = slow; + ci->ci_is_slow = false; for (rel = 0; rel < __arraycount(ci->ci_sibling); rel++) { ci->ci_sibling[rel] = ci; ci->ci_nsibling[rel] = 1; } } +/* + * Collect CPU relative speed + */ +void +cpu_topology_setspeed(struct cpu_info *ci, bool slow) +{ + + cpu_topology_haveslow |= slow; + ci->ci_is_slow = slow; +} + /* * Link a CPU into the given circular list. */ diff --git a/sys/sys/cpu.h b/sys/sys/cpu.h index bcd28724b4a0..65dd8a496142 100644 --- a/sys/sys/cpu.h +++ b/sys/sys/cpu.h @@ -90,7 +90,8 @@ bool cpu_kpreempt_disabled(void); int cpu_lwp_setprivate(struct lwp *, void *); void cpu_intr_redistribute(void); u_int cpu_intr_count(struct cpu_info *); -void cpu_topology_set(struct cpu_info *, u_int, u_int, u_int, u_int, bool); +void cpu_topology_set(struct cpu_info *, u_int, u_int, u_int, u_int); +void cpu_topology_setspeed(struct cpu_info *, bool); void cpu_topology_init(void); #endif