1
0

arch_topology: move parse_acpi_topology() to common code

Currently, RISC-V lacks arch-specific registers for CPU topology
properties and must get them from ACPI. Thus, parse_acpi_topology()
is moved from arm64/ to drivers/ for RISC-V reuse.

Signed-off-by: Yunhui Cui <cuiyunhui@bytedance.com>
Reviewed-by: Sudeep Holla <sudeep.holla@arm.com>
Link: https://patch.msgid.link/20250923015409.15983-2-cuiyunhui@bytedance.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Yunhui Cui
2025-09-23 09:54:09 +08:00
committed by Greg Kroah-Hartman
parent cebd22dd3a
commit 6d0ef68955
4 changed files with 103 additions and 102 deletions

View File

@@ -36,6 +36,9 @@ void update_freq_counters_refs(void);
#define arch_scale_hw_pressure topology_get_hw_pressure
#define arch_update_hw_pressure topology_update_hw_pressure
#undef arch_cpu_is_threaded
#define arch_cpu_is_threaded() (read_cpuid_mpidr() & MPIDR_MT_BITMASK)
#include <asm-generic/topology.h>
#endif /* _ASM_ARM_TOPOLOGY_H */

View File

@@ -25,107 +25,6 @@
#include <asm/cputype.h>
#include <asm/topology.h>
#ifdef CONFIG_ACPI
static bool __init acpi_cpu_is_threaded(int cpu)
{
int is_threaded = acpi_pptt_cpu_is_thread(cpu);
/*
* if the PPTT doesn't have thread information, assume a homogeneous
* machine and return the current CPU's thread state.
*/
if (is_threaded < 0)
is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK;
return !!is_threaded;
}
struct cpu_smt_info {
unsigned int thread_num;
int core_id;
};
/*
* Propagate the topology information of the processor_topology_node tree to the
* cpu_topology array.
*/
int __init parse_acpi_topology(void)
{
unsigned int max_smt_thread_num = 1;
struct cpu_smt_info *entry;
struct xarray hetero_cpu;
unsigned long hetero_id;
int cpu, topology_id;
if (acpi_disabled)
return 0;
xa_init(&hetero_cpu);
for_each_possible_cpu(cpu) {
topology_id = find_acpi_cpu_topology(cpu, 0);
if (topology_id < 0)
return topology_id;
if (acpi_cpu_is_threaded(cpu)) {
cpu_topology[cpu].thread_id = topology_id;
topology_id = find_acpi_cpu_topology(cpu, 1);
cpu_topology[cpu].core_id = topology_id;
/*
* In the PPTT, CPUs below a node with the 'identical
* implementation' flag have the same number of threads.
* Count the number of threads for only one CPU (i.e.
* one core_id) among those with the same hetero_id.
* See the comment of find_acpi_cpu_topology_hetero_id()
* for more details.
*
* One entry is created for each node having:
* - the 'identical implementation' flag
* - its parent not having the flag
*/
hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
entry = xa_load(&hetero_cpu, hetero_id);
if (!entry) {
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
WARN_ON_ONCE(!entry);
if (entry) {
entry->core_id = topology_id;
entry->thread_num = 1;
xa_store(&hetero_cpu, hetero_id,
entry, GFP_KERNEL);
}
} else if (entry->core_id == topology_id) {
entry->thread_num++;
}
} else {
cpu_topology[cpu].thread_id = -1;
cpu_topology[cpu].core_id = topology_id;
}
topology_id = find_acpi_cpu_topology_cluster(cpu);
cpu_topology[cpu].cluster_id = topology_id;
topology_id = find_acpi_cpu_topology_package(cpu);
cpu_topology[cpu].package_id = topology_id;
}
/*
* This is a short loop since the number of XArray elements is the
* number of heterogeneous CPU clusters. On a homogeneous system
* there's only one entry in the XArray.
*/
xa_for_each(&hetero_cpu, hetero_id, entry) {
max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
xa_erase(&hetero_cpu, hetero_id);
kfree(entry);
}
cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
xa_destroy(&hetero_cpu);
return 0;
}
#endif
#ifdef CONFIG_ARM64_AMU_EXTN
#define read_corecnt() read_sysreg_s(SYS_AMEVCNTR0_CORE_EL0)
#define read_constcnt() read_sysreg_s(SYS_AMEVCNTR0_CONST_EL0)

View File

@@ -823,12 +823,106 @@ void remove_cpu_topology(unsigned int cpu)
clear_cpu_topology(cpu);
}
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
struct cpu_smt_info {
unsigned int thread_num;
int core_id;
};
static bool __init acpi_cpu_is_threaded(int cpu)
{
int is_threaded = acpi_pptt_cpu_is_thread(cpu);
/*
* if the PPTT doesn't have thread information, check for architecture
* specific fallback if available
*/
if (is_threaded < 0)
is_threaded = arch_cpu_is_threaded();
return !!is_threaded;
}
/*
* Propagate the topology information of the processor_topology_node tree to the
* cpu_topology array.
*/
__weak int __init parse_acpi_topology(void)
{
unsigned int max_smt_thread_num = 1;
struct cpu_smt_info *entry;
struct xarray hetero_cpu;
unsigned long hetero_id;
int cpu, topology_id;
if (acpi_disabled)
return 0;
xa_init(&hetero_cpu);
for_each_possible_cpu(cpu) {
topology_id = find_acpi_cpu_topology(cpu, 0);
if (topology_id < 0)
return topology_id;
if (acpi_cpu_is_threaded(cpu)) {
cpu_topology[cpu].thread_id = topology_id;
topology_id = find_acpi_cpu_topology(cpu, 1);
cpu_topology[cpu].core_id = topology_id;
/*
* In the PPTT, CPUs below a node with the 'identical
* implementation' flag have the same number of threads.
* Count the number of threads for only one CPU (i.e.
* one core_id) among those with the same hetero_id.
* See the comment of find_acpi_cpu_topology_hetero_id()
* for more details.
*
* One entry is created for each node having:
* - the 'identical implementation' flag
* - its parent not having the flag
*/
hetero_id = find_acpi_cpu_topology_hetero_id(cpu);
entry = xa_load(&hetero_cpu, hetero_id);
if (!entry) {
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
WARN_ON_ONCE(!entry);
if (entry) {
entry->core_id = topology_id;
entry->thread_num = 1;
xa_store(&hetero_cpu, hetero_id,
entry, GFP_KERNEL);
}
} else if (entry->core_id == topology_id) {
entry->thread_num++;
}
} else {
cpu_topology[cpu].thread_id = -1;
cpu_topology[cpu].core_id = topology_id;
}
topology_id = find_acpi_cpu_topology_cluster(cpu);
cpu_topology[cpu].cluster_id = topology_id;
topology_id = find_acpi_cpu_topology_package(cpu);
cpu_topology[cpu].package_id = topology_id;
}
/*
* This is a short loop since the number of XArray elements is the
* number of heterogeneous CPU clusters. On a homogeneous system
* there's only one entry in the XArray.
*/
xa_for_each(&hetero_cpu, hetero_id, entry) {
max_smt_thread_num = max(max_smt_thread_num, entry->thread_num);
xa_erase(&hetero_cpu, hetero_id);
kfree(entry);
}
cpu_smt_set_num_threads(max_smt_thread_num, max_smt_thread_num);
xa_destroy(&hetero_cpu);
return 0;
}
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
void __init init_cpu_topology(void)
{
int cpu, ret;

View File

@@ -80,6 +80,11 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
#define topology_cluster_cpumask(cpu) (&cpu_topology[cpu].cluster_sibling)
#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling)
#ifndef arch_cpu_is_threaded
#define arch_cpu_is_threaded() (0)
#endif
void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);