Revert "sched/topology: Consolidate and clean up access to a CPU's max compute capacity"
This reverts commit 7fc781ca93
which is
commit 7bc263840bc3377186cb06b003ac287bb2f18ce2 upstream.
It breaks the Android kernel abi and can be brought back in the future
in an abi-safe way if it is really needed.
Bug: 161946584
Change-Id: I2db630d00cd4c52eac8d7371a99b415b40fd252a
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
@@ -39,15 +39,14 @@ per Hz, leading to::
|
||||
-------------------
|
||||
|
||||
Two different capacity values are used within the scheduler. A CPU's
|
||||
``original capacity`` is its maximum attainable capacity, i.e. its maximum
|
||||
attainable performance level. This original capacity is returned by
|
||||
the function arch_scale_cpu_capacity(). A CPU's ``capacity`` is its ``original
|
||||
capacity`` to which some loss of available performance (e.g. time spent
|
||||
handling IRQs) is subtracted.
|
||||
``capacity_orig`` is its maximum attainable capacity, i.e. its maximum
|
||||
attainable performance level. A CPU's ``capacity`` is its ``capacity_orig`` to
|
||||
which some loss of available performance (e.g. time spent handling IRQs) is
|
||||
subtracted.
|
||||
|
||||
Note that a CPU's ``capacity`` is solely intended to be used by the CFS class,
|
||||
while ``original capacity`` is class-agnostic. The rest of this document will use
|
||||
the term ``capacity`` interchangeably with ``original capacity`` for the sake of
|
||||
while ``capacity_orig`` is class-agnostic. The rest of this document will use
|
||||
the term ``capacity`` interchangeably with ``capacity_orig`` for the sake of
|
||||
brevity.
|
||||
|
||||
1.3 Platform examples
|
||||
|
@@ -10269,7 +10269,7 @@ void __init sched_init(void)
|
||||
#ifdef CONFIG_SMP
|
||||
rq->sd = NULL;
|
||||
rq->rd = NULL;
|
||||
rq->cpu_capacity = SCHED_CAPACITY_SCALE;
|
||||
rq->cpu_capacity = rq->cpu_capacity_orig = SCHED_CAPACITY_SCALE;
|
||||
rq->balance_callback = &balance_push_callback;
|
||||
rq->active_balance = 0;
|
||||
rq->next_balance = jiffies;
|
||||
|
@@ -131,7 +131,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
|
||||
if (!dl_task_fits_capacity(p, cpu)) {
|
||||
cpumask_clear_cpu(cpu, later_mask);
|
||||
|
||||
cap = arch_scale_cpu_capacity(cpu);
|
||||
cap = capacity_orig_of(cpu);
|
||||
|
||||
if (cap > max_cap ||
|
||||
(cpu == task_cpu(p) && cap == max_cap)) {
|
||||
|
@@ -132,7 +132,7 @@ static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
|
||||
int i;
|
||||
|
||||
for_each_cpu_and(i, mask, cpu_active_mask)
|
||||
cap += arch_scale_cpu_capacity(i);
|
||||
cap += capacity_orig_of(i);
|
||||
|
||||
return cap;
|
||||
}
|
||||
@@ -144,7 +144,7 @@ static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
|
||||
static inline unsigned long dl_bw_capacity(int i)
|
||||
{
|
||||
if (!sched_asym_cpucap_active() &&
|
||||
arch_scale_cpu_capacity(i) == SCHED_CAPACITY_SCALE) {
|
||||
capacity_orig_of(i) == SCHED_CAPACITY_SCALE) {
|
||||
return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
|
||||
} else {
|
||||
RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
|
||||
|
@@ -4991,14 +4991,14 @@ static inline int util_fits_cpu(unsigned long util,
|
||||
return fits;
|
||||
|
||||
/*
|
||||
* We must use arch_scale_cpu_capacity() for comparing against uclamp_min and
|
||||
* We must use capacity_orig_of() for comparing against uclamp_min and
|
||||
* uclamp_max. We only care about capacity pressure (by using
|
||||
* capacity_of()) for comparing against the real util.
|
||||
*
|
||||
* If a task is boosted to 1024 for example, we don't want a tiny
|
||||
* pressure to skew the check whether it fits a CPU or not.
|
||||
*
|
||||
* Similarly if a task is capped to arch_scale_cpu_capacity(little_cpu), it
|
||||
* Similarly if a task is capped to capacity_orig_of(little_cpu), it
|
||||
* should fit a little cpu even if there's some pressure.
|
||||
*
|
||||
* Only exception is for thermal pressure since it has a direct impact
|
||||
@@ -5010,7 +5010,7 @@ static inline int util_fits_cpu(unsigned long util,
|
||||
* For uclamp_max, we can tolerate a drop in performance level as the
|
||||
* goal is to cap the task. So it's okay if it's getting less.
|
||||
*/
|
||||
capacity_orig = arch_scale_cpu_capacity(cpu);
|
||||
capacity_orig = capacity_orig_of(cpu);
|
||||
capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
|
||||
|
||||
/*
|
||||
@@ -7536,7 +7536,7 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
|
||||
* Look for the CPU with best capacity.
|
||||
*/
|
||||
else if (fits < 0)
|
||||
cpu_cap = arch_scale_cpu_capacity(cpu) - thermal_load_avg(cpu_rq(cpu));
|
||||
cpu_cap = capacity_orig_of(cpu) - thermal_load_avg(cpu_rq(cpu));
|
||||
|
||||
/*
|
||||
* First, select CPU which fits better (-1 being better than 0).
|
||||
@@ -7778,7 +7778,7 @@ cpu_util(int cpu, struct task_struct *p, int dst_cpu, int boost)
|
||||
util = max(util, util_est);
|
||||
}
|
||||
|
||||
return min(util, arch_scale_cpu_capacity(cpu));
|
||||
return min(util, capacity_orig_of(cpu));
|
||||
}
|
||||
|
||||
unsigned long cpu_util_cfs(int cpu)
|
||||
@@ -9626,6 +9626,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
|
||||
unsigned long capacity = scale_rt_capacity(cpu);
|
||||
struct sched_group *sdg = sd->groups;
|
||||
|
||||
cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
|
||||
|
||||
if (!capacity)
|
||||
capacity = 1;
|
||||
|
||||
@@ -9702,7 +9704,7 @@ static inline int
|
||||
check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
|
||||
{
|
||||
return ((rq->cpu_capacity * sd->imbalance_pct) <
|
||||
(arch_scale_cpu_capacity(cpu_of(rq)) * 100));
|
||||
(rq->cpu_capacity_orig * 100));
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -9713,7 +9715,7 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
|
||||
static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd)
|
||||
{
|
||||
return rq->misfit_task_load &&
|
||||
(arch_scale_cpu_capacity(rq->cpu) < rq->rd->max_cpu_capacity ||
|
||||
(rq->cpu_capacity_orig < rq->rd->max_cpu_capacity ||
|
||||
check_cpu_capacity(rq, sd));
|
||||
}
|
||||
|
||||
|
@@ -521,7 +521,7 @@ static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
|
||||
min_cap = uclamp_eff_value(p, UCLAMP_MIN);
|
||||
max_cap = uclamp_eff_value(p, UCLAMP_MAX);
|
||||
|
||||
cpu_cap = arch_scale_cpu_capacity(cpu);
|
||||
cpu_cap = capacity_orig_of(cpu);
|
||||
|
||||
return cpu_cap >= min(min_cap, max_cap);
|
||||
}
|
||||
|
@@ -1076,6 +1076,7 @@ struct rq {
|
||||
struct sched_domain __rcu *sd;
|
||||
|
||||
unsigned long cpu_capacity;
|
||||
unsigned long cpu_capacity_orig;
|
||||
|
||||
struct balance_callback *balance_callback;
|
||||
|
||||
@@ -3029,6 +3030,11 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static inline unsigned long capacity_orig_of(int cpu)
|
||||
{
|
||||
return cpu_rq(cpu)->cpu_capacity_orig;
|
||||
}
|
||||
|
||||
/**
|
||||
* enum cpu_util_type - CPU utilization type
|
||||
* @FREQUENCY_UTIL: Utilization used to select frequency
|
||||
|
@@ -2478,15 +2478,12 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
|
||||
/* Attach the domains */
|
||||
rcu_read_lock();
|
||||
for_each_cpu(i, cpu_map) {
|
||||
unsigned long capacity;
|
||||
|
||||
rq = cpu_rq(i);
|
||||
sd = *per_cpu_ptr(d.sd, i);
|
||||
|
||||
capacity = arch_scale_cpu_capacity(i);
|
||||
/* Use READ_ONCE()/WRITE_ONCE() to avoid load/store tearing: */
|
||||
if (capacity > READ_ONCE(d.rd->max_cpu_capacity))
|
||||
WRITE_ONCE(d.rd->max_cpu_capacity, capacity);
|
||||
if (rq->cpu_capacity_orig > READ_ONCE(d.rd->max_cpu_capacity))
|
||||
WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig);
|
||||
|
||||
cpu_attach_domain(sd, d.rd, i);
|
||||
}
|
||||
|
Reference in New Issue
Block a user