2016-04-02 01:09:12 +02:00
|
|
|
/*
|
|
|
|
* CPUFreq governor based on scheduler-provided CPU utilization data.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2016, Intel Corporation
|
|
|
|
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
2016-05-18 17:55:28 +05:30
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
#include <linux/cpufreq.h>
|
2020-04-04 13:06:34 +07:00
|
|
|
#include <linux/kthread.h>
|
2016-04-02 01:09:12 +02:00
|
|
|
#include <linux/slab.h>
|
2017-11-02 20:47:24 +09:00
|
|
|
#include <linux/cpu_pm.h>
|
|
|
|
#include <linux/of.h>
|
2020-07-28 00:51:52 +07:00
|
|
|
#include <linux/ems.h>
|
2016-04-02 01:09:12 +02:00
|
|
|
#include <trace/events/power.h>
|
|
|
|
|
|
|
|
#include "sched.h"
|
2016-12-14 16:10:10 +00:00
|
|
|
#include "tune.h"
|
2020-07-28 00:51:52 +07:00
|
|
|
#include "ems/ems.h"
|
2016-12-14 16:10:10 +00:00
|
|
|
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
#include <linux/kair.h>
|
|
|
|
/**
|
|
|
|
* 2nd argument of kair_obj_creator() experimentally decided by KAIR client
|
|
|
|
* itself, which represents how much variant the random variable registered to
|
|
|
|
* the KAIR instance can behave at most, in terms of referencing d2u_decl_cmtpdf
|
|
|
|
* table(maximum index of d2u_decl_cmtpdf table).
|
|
|
|
**/
|
|
|
|
#define UTILAVG_KAIR_VARIANCE 16
|
|
|
|
DECLARE_KAIRISTICS(cpufreq, 32, 25, 24, 25);
|
|
|
|
#endif
|
|
|
|
|
ANDROID: sched/rt: Add schedtune accounting to rt task enqueue/dequeue
rt tasks are currently not eligible for schedtune boosting. Make it so
by adding enqueue/dequeue hooks.
For rt tasks, schedtune only acts as a frequency boosting framework, it
has no impact on placement decisions and the prefer_idle attribute is
not used.
Also prepare schedutil use of boosted util for rt task boosting
With this change, schedtune accounting will include rt class tasks,
however boosting currently only applies to the utilization provided by
fair class tasks. Sum up the tracked CPU utilization applying boost to
the aggregate util instead - this includes RT task util in the boosting
if any tasks are runnable.
Scenario 1, considering one CPU:
1x rt task running, util 250, boost 0
1x cfs task runnable, util 250, boost 50
previous util=250+(50pct_boosted_250) = 887
new util=50_pct_boosted_500 = 762
Scenario 2, considering one CPU:
1x rt task running, util 250, boost 50
1x cfs task runnable, util 250, boost 0
previous util=250+250 = 500
new util=50_pct_boosted_500 = 762
Scenario 3, considering one CPU:
1x rt task running, util 250, boost 50
1x cfs task runnable, util 250, boost 50
previous util=250+(50pct_boosted_250) = 887
new util=50_pct_boosted_500 = 762
Scenario 4:
1x rt task running, util 250, boost 50
previous util=250 = 250
new util=50_pct_boosted_250 = 637
Change-Id: Ie287cbd0692468525095b5024db9faac8b2f4878
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
2018-07-09 16:54:02 +01:00
|
|
|
unsigned long boosted_cpu_util(int cpu, unsigned long other_util);
|
2016-04-02 01:09:12 +02:00
|
|
|
|
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
The rate-limit tunable in the schedutil governor applies to transitions
to both lower and higher frequencies. On several platforms it is not the
ideal tunable though, as it is difficult to get best power/performance
figures using the same limit in both directions.
It is common on mobile platforms with demanding user interfaces to want
to increase frequency rapidly for example but decrease slowly.
One of the example can be a case where we have short busy periods
followed by similar or longer idle periods. If we keep the rate-limit
high enough, we will not go to higher frequencies soon enough. On the
other hand, if we keep it too low, we will have too many frequency
transitions, as we will always reduce the frequency after the busy
period.
It would be very useful if we can set low rate-limit while increasing
the frequency (so that we can respond to the short busy periods quickly)
and high rate-limit while decreasing frequency (so that we don't reduce
the frequency immediately after the short busy period and that may avoid
frequency transitions before the next busy period).
Implement separate up/down transition rate limits. Note that the
governor avoids frequency recalculations for a period equal to minimum
of up and down rate-limit. A global mutex is also defined to protect
updates to min_rate_limit_us via two separate sysfs files.
Note that this wouldn't change behavior of the schedutil governor for
the platforms which wish to keep same values for both up and down rate
limits.
This is tested with the rt-app [1] on ARM Exynos, dual A15 processor
platform.
Testcase: Run a SCHED_OTHER thread on CPU0 which will emulate work-load
for X ms of busy period out of the total period of Y ms, i.e. Y - X ms
of idle period. The values of X/Y taken were: 20/40, 20/50, 20/70, i.e
idle periods of 20, 30 and 50 ms respectively. These were tested against
values of up/down rate limits as: 10/10 ms and 10/40 ms.
For every test we noticed a performance increase of 5-10% with the
schedutil governor, which was very much expected.
[Viresh]: Simplified user interface and introduced min_rate_limit_us +
mutex, rewrote commit log and included test results.
[1] https://github.com/scheduler-tools/rt-app/
Change-Id: I18720a83855b196b8e21dcdc8deae79131635b84
Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
(applied from https://marc.info/?l=linux-kernel&m=147936011103832&w=2)
[trivial adaptations]
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2016-11-17 10:48:45 +05:30
|
|
|
/* Stub out fast switch routines present on mainline to reduce the backport
|
|
|
|
* overhead. */
|
|
|
|
#define cpufreq_driver_fast_switch(x, y) 0
|
|
|
|
#define cpufreq_enable_fast_switch(x)
|
|
|
|
#define cpufreq_disable_fast_switch(x)
|
|
|
|
#define LATENCY_MULTIPLIER (1000)
|
2020-04-04 13:06:34 +07:00
|
|
|
#define SUGOV_KTHREAD_PRIORITY 50
|
2016-11-15 13:53:22 +05:30
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
struct sugov_tunables {
|
|
|
|
struct gov_attr_set attr_set;
|
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
The rate-limit tunable in the schedutil governor applies to transitions
to both lower and higher frequencies. On several platforms it is not the
ideal tunable though, as it is difficult to get best power/performance
figures using the same limit in both directions.
It is common on mobile platforms with demanding user interfaces to want
to increase frequency rapidly for example but decrease slowly.
One of the example can be a case where we have short busy periods
followed by similar or longer idle periods. If we keep the rate-limit
high enough, we will not go to higher frequencies soon enough. On the
other hand, if we keep it too low, we will have too many frequency
transitions, as we will always reduce the frequency after the busy
period.
It would be very useful if we can set low rate-limit while increasing
the frequency (so that we can respond to the short busy periods quickly)
and high rate-limit while decreasing frequency (so that we don't reduce
the frequency immediately after the short busy period and that may avoid
frequency transitions before the next busy period).
Implement separate up/down transition rate limits. Note that the
governor avoids frequency recalculations for a period equal to minimum
of up and down rate-limit. A global mutex is also defined to protect
updates to min_rate_limit_us via two separate sysfs files.
Note that this wouldn't change behavior of the schedutil governor for
the platforms which wish to keep same values for both up and down rate
limits.
This is tested with the rt-app [1] on ARM Exynos, dual A15 processor
platform.
Testcase: Run a SCHED_OTHER thread on CPU0 which will emulate work-load
for X ms of busy period out of the total period of Y ms, i.e. Y - X ms
of idle period. The values of X/Y taken were: 20/40, 20/50, 20/70, i.e
idle periods of 20, 30 and 50 ms respectively. These were tested against
values of up/down rate limits as: 10/10 ms and 10/40 ms.
For every test we noticed a performance increase of 5-10% with the
schedutil governor, which was very much expected.
[Viresh]: Simplified user interface and introduced min_rate_limit_us +
mutex, rewrote commit log and included test results.
[1] https://github.com/scheduler-tools/rt-app/
Change-Id: I18720a83855b196b8e21dcdc8deae79131635b84
Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
(applied from https://marc.info/?l=linux-kernel&m=147936011103832&w=2)
[trivial adaptations]
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2016-11-17 10:48:45 +05:30
|
|
|
unsigned int up_rate_limit_us;
|
|
|
|
unsigned int down_rate_limit_us;
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
bool fb_legacy;
|
|
|
|
#endif
|
2017-05-18 22:46:10 -07:00
|
|
|
bool iowait_boost_enable;
|
2020-03-02 20:34:04 -08:00
|
|
|
bool exp_util;
|
2016-04-02 01:09:12 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
struct sugov_policy {
|
|
|
|
struct cpufreq_policy *policy;
|
|
|
|
|
|
|
|
struct sugov_tunables *tunables;
|
|
|
|
struct list_head tunables_hook;
|
|
|
|
|
|
|
|
raw_spinlock_t update_lock; /* For shared policies */
|
|
|
|
u64 last_freq_update_time;
|
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
The rate-limit tunable in the schedutil governor applies to transitions
to both lower and higher frequencies. On several platforms it is not the
ideal tunable though, as it is difficult to get best power/performance
figures using the same limit in both directions.
It is common on mobile platforms with demanding user interfaces to want
to increase frequency rapidly for example but decrease slowly.
One of the example can be a case where we have short busy periods
followed by similar or longer idle periods. If we keep the rate-limit
high enough, we will not go to higher frequencies soon enough. On the
other hand, if we keep it too low, we will have too many frequency
transitions, as we will always reduce the frequency after the busy
period.
It would be very useful if we can set low rate-limit while increasing
the frequency (so that we can respond to the short busy periods quickly)
and high rate-limit while decreasing frequency (so that we don't reduce
the frequency immediately after the short busy period and that may avoid
frequency transitions before the next busy period).
Implement separate up/down transition rate limits. Note that the
governor avoids frequency recalculations for a period equal to minimum
of up and down rate-limit. A global mutex is also defined to protect
updates to min_rate_limit_us via two separate sysfs files.
Note that this wouldn't change behavior of the schedutil governor for
the platforms which wish to keep same values for both up and down rate
limits.
This is tested with the rt-app [1] on ARM Exynos, dual A15 processor
platform.
Testcase: Run a SCHED_OTHER thread on CPU0 which will emulate work-load
for X ms of busy period out of the total period of Y ms, i.e. Y - X ms
of idle period. The values of X/Y taken were: 20/40, 20/50, 20/70, i.e
idle periods of 20, 30 and 50 ms respectively. These were tested against
values of up/down rate limits as: 10/10 ms and 10/40 ms.
For every test we noticed a performance increase of 5-10% with the
schedutil governor, which was very much expected.
[Viresh]: Simplified user interface and introduced min_rate_limit_us +
mutex, rewrote commit log and included test results.
[1] https://github.com/scheduler-tools/rt-app/
Change-Id: I18720a83855b196b8e21dcdc8deae79131635b84
Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
(applied from https://marc.info/?l=linux-kernel&m=147936011103832&w=2)
[trivial adaptations]
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2016-11-17 10:48:45 +05:30
|
|
|
s64 min_rate_limit_ns;
|
|
|
|
s64 up_rate_delay_ns;
|
|
|
|
s64 down_rate_delay_ns;
|
2016-04-02 01:09:12 +02:00
|
|
|
unsigned int next_freq;
|
2017-03-02 14:03:20 +05:30
|
|
|
unsigned int cached_raw_freq;
|
2016-04-02 01:09:12 +02:00
|
|
|
|
|
|
|
/* The next fields are only needed if fast switch cannot be used. */
|
|
|
|
struct irq_work irq_work;
|
2020-04-04 13:06:34 +07:00
|
|
|
struct kthread_work work;
|
2016-04-02 01:09:12 +02:00
|
|
|
struct mutex work_lock;
|
2020-04-04 13:06:34 +07:00
|
|
|
struct kthread_worker worker;
|
|
|
|
struct task_struct *thread;
|
2016-04-02 01:09:12 +02:00
|
|
|
bool work_in_progress;
|
|
|
|
|
|
|
|
bool need_freq_update;
|
2019-08-07 12:36:01 +05:30
|
|
|
bool limits_changed;
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
bool be_stochastic;
|
|
|
|
#endif
|
2016-04-02 01:09:12 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
struct sugov_cpu {
|
|
|
|
struct update_util_data update_util;
|
|
|
|
struct sugov_policy *sg_policy;
|
|
|
|
|
2017-08-28 09:56:27 -07:00
|
|
|
bool iowait_boost_pending;
|
2017-07-23 08:54:26 -07:00
|
|
|
unsigned int iowait_boost;
|
|
|
|
unsigned int iowait_boost_max;
|
2016-09-10 00:00:31 +02:00
|
|
|
u64 last_update;
|
2016-07-13 13:25:26 -07:00
|
|
|
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
/**
|
|
|
|
* KAIR instance which should be referenced in percpu manner,
|
|
|
|
* and data accordingly to handle the target job intensity.
|
|
|
|
**/
|
|
|
|
struct kair_class *util_vessel;
|
|
|
|
unsigned long cached_util;
|
|
|
|
#endif
|
2016-04-02 01:09:12 +02:00
|
|
|
/* The fields below are only needed when sharing a policy. */
|
|
|
|
unsigned long util;
|
|
|
|
unsigned long max;
|
2016-08-16 22:14:55 +02:00
|
|
|
unsigned int flags;
|
UPSTREAM: cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
The way the schedutil governor uses the PELT metric causes it to
underestimate the CPU utilization in some cases.
That can be easily demonstrated by running kernel compilation on
a Sandy Bridge Intel processor, running turbostat in parallel with
it and looking at the values written to the MSR_IA32_PERF_CTL
register. Namely, the expected result would be that when all CPUs
were 100% busy, all of them would be requested to run in the maximum
P-state, but observation shows that this clearly isn't the case.
The CPUs run in the maximum P-state for a while and then are
requested to run slower and go back to the maximum P-state after
a while again. That causes the actual frequency of the processor to
visibly oscillate below the sustainable maximum in a jittery fashion
which clearly is not desirable.
That has been attributed to CPU utilization metric updates on task
migration that cause the total utilization value for the CPU to be
reduced by the utilization of the migrated task. If that happens,
the schedutil governor may see a CPU utilization reduction and will
attempt to reduce the CPU frequency accordingly right away. That
may be premature, though, for example if the system is generally
busy and there are other runnable tasks waiting to be run on that
CPU already.
This is unlikely to be an issue on systems where cpufreq policies are
shared between multiple CPUs, because in those cases the policy
utilization is computed as the maximum of the CPU utilization values
over the whole policy and if that turns out to be low, reducing the
frequency for the policy most likely is a good idea anyway. On
systems with one CPU per policy, however, it may affect performance
adversely and even lead to increased energy consumption in some cases.
On those systems it may be addressed by taking another utilization
metric into consideration, like whether or not the CPU whose
frequency is about to be reduced has been idle recently, because if
that's not the case, the CPU is likely to be busy in the near future
and its frequency should not be reduced.
To that end, use the counter of idle calls in the timekeeping code.
Namely, make the schedutil governor look at that counter for the
current CPU every time before its frequency is about to be reduced.
If the counter has not changed since the previous iteration of the
governor computations for that CPU, the CPU has been busy for all
that time and its frequency should not be decreased, so if the new
frequency would be lower than the one set previously, the governor
will skip the frequency update.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Joel Fernandes <joelaf@google.com>
(cherry picked from commit b7eaf1aab9f8bd2e49fceed77ebc66c1b5800718)
(simple CPUFREQ_RT_DL vs CPUFREQ_DL usage conflicts)
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Change-Id: I531ec02c052944ee07a904dc2a25c59948ee762b
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2017-05-25 15:24:58 +01:00
|
|
|
|
|
|
|
/* The field below is for single-CPU policies only. */
|
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
|
|
unsigned long saved_idle_calls;
|
|
|
|
#endif
|
2016-04-02 01:09:12 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
|
2018-05-29 17:23:04 +02:00
|
|
|
static DEFINE_PER_CPU(struct sugov_tunables *, cached_tunables);
|
2016-04-02 01:09:12 +02:00
|
|
|
|
2017-11-02 20:47:24 +09:00
|
|
|
/******************* exynos specific function *******************/
|
|
|
|
#define DEFAULT_EXPIRED_TIME 70
|
|
|
|
struct sugov_exynos {
|
|
|
|
/* for slack timer */
|
|
|
|
unsigned long min;
|
|
|
|
int enabled;
|
|
|
|
bool started;
|
|
|
|
int expired_time;
|
|
|
|
struct timer_list timer;
|
|
|
|
|
|
|
|
/* pm_qos_class */
|
|
|
|
int qos_min_class;
|
|
|
|
};
|
|
|
|
static DEFINE_PER_CPU(struct sugov_exynos, sugov_exynos);
|
|
|
|
static void sugov_stop_slack(int cpu);
|
|
|
|
static void sugov_start_slack(int cpu);
|
|
|
|
static void sugov_update_min(struct cpufreq_policy *policy);
|
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
/************************ Governor internals ***********************/
|
|
|
|
|
|
|
|
static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
|
|
|
|
{
|
|
|
|
s64 delta_ns;
|
|
|
|
|
2019-08-07 12:36:01 +05:30
|
|
|
if (unlikely(sg_policy->limits_changed)) {
|
|
|
|
sg_policy->limits_changed = false;
|
|
|
|
sg_policy->need_freq_update = true;
|
2016-04-02 01:09:12 +02:00
|
|
|
return true;
|
2019-08-07 12:36:01 +05:30
|
|
|
}
|
2016-04-02 01:09:12 +02:00
|
|
|
|
2020-09-18 16:40:46 +03:00
|
|
|
/* No need to recalculate next freq for min_rate_limit_us
|
|
|
|
* at least. However we might still decide to further rate
|
|
|
|
* limit once frequency change direction is decided, according
|
|
|
|
* to the separate rate limits.
|
|
|
|
*/
|
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
The rate-limit tunable in the schedutil governor applies to transitions
to both lower and higher frequencies. On several platforms it is not the
ideal tunable though, as it is difficult to get best power/performance
figures using the same limit in both directions.
It is common on mobile platforms with demanding user interfaces to want
to increase frequency rapidly for example but decrease slowly.
One of the example can be a case where we have short busy periods
followed by similar or longer idle periods. If we keep the rate-limit
high enough, we will not go to higher frequencies soon enough. On the
other hand, if we keep it too low, we will have too many frequency
transitions, as we will always reduce the frequency after the busy
period.
It would be very useful if we can set low rate-limit while increasing
the frequency (so that we can respond to the short busy periods quickly)
and high rate-limit while decreasing frequency (so that we don't reduce
the frequency immediately after the short busy period and that may avoid
frequency transitions before the next busy period).
Implement separate up/down transition rate limits. Note that the
governor avoids frequency recalculations for a period equal to minimum
of up and down rate-limit. A global mutex is also defined to protect
updates to min_rate_limit_us via two separate sysfs files.
Note that this wouldn't change behavior of the schedutil governor for
the platforms which wish to keep same values for both up and down rate
limits.
This is tested with the rt-app [1] on ARM Exynos, dual A15 processor
platform.
Testcase: Run a SCHED_OTHER thread on CPU0 which will emulate work-load
for X ms of busy period out of the total period of Y ms, i.e. Y - X ms
of idle period. The values of X/Y taken were: 20/40, 20/50, 20/70, i.e
idle periods of 20, 30 and 50 ms respectively. These were tested against
values of up/down rate limits as: 10/10 ms and 10/40 ms.
For every test we noticed a performance increase of 5-10% with the
schedutil governor, which was very much expected.
[Viresh]: Simplified user interface and introduced min_rate_limit_us +
mutex, rewrote commit log and included test results.
[1] https://github.com/scheduler-tools/rt-app/
Change-Id: I18720a83855b196b8e21dcdc8deae79131635b84
Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
(applied from https://marc.info/?l=linux-kernel&m=147936011103832&w=2)
[trivial adaptations]
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2016-11-17 10:48:45 +05:30
|
|
|
|
2020-09-18 16:40:46 +03:00
|
|
|
delta_ns = time - sg_policy->last_freq_update_time;
|
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
The rate-limit tunable in the schedutil governor applies to transitions
to both lower and higher frequencies. On several platforms it is not the
ideal tunable though, as it is difficult to get best power/performance
figures using the same limit in both directions.
It is common on mobile platforms with demanding user interfaces to want
to increase frequency rapidly for example but decrease slowly.
One of the example can be a case where we have short busy periods
followed by similar or longer idle periods. If we keep the rate-limit
high enough, we will not go to higher frequencies soon enough. On the
other hand, if we keep it too low, we will have too many frequency
transitions, as we will always reduce the frequency after the busy
period.
It would be very useful if we can set low rate-limit while increasing
the frequency (so that we can respond to the short busy periods quickly)
and high rate-limit while decreasing frequency (so that we don't reduce
the frequency immediately after the short busy period and that may avoid
frequency transitions before the next busy period).
Implement separate up/down transition rate limits. Note that the
governor avoids frequency recalculations for a period equal to minimum
of up and down rate-limit. A global mutex is also defined to protect
updates to min_rate_limit_us via two separate sysfs files.
Note that this wouldn't change behavior of the schedutil governor for
the platforms which wish to keep same values for both up and down rate
limits.
This is tested with the rt-app [1] on ARM Exynos, dual A15 processor
platform.
Testcase: Run a SCHED_OTHER thread on CPU0 which will emulate work-load
for X ms of busy period out of the total period of Y ms, i.e. Y - X ms
of idle period. The values of X/Y taken were: 20/40, 20/50, 20/70, i.e
idle periods of 20, 30 and 50 ms respectively. These were tested against
values of up/down rate limits as: 10/10 ms and 10/40 ms.
For every test we noticed a performance increase of 5-10% with the
schedutil governor, which was very much expected.
[Viresh]: Simplified user interface and introduced min_rate_limit_us +
mutex, rewrote commit log and included test results.
[1] https://github.com/scheduler-tools/rt-app/
Change-Id: I18720a83855b196b8e21dcdc8deae79131635b84
Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
(applied from https://marc.info/?l=linux-kernel&m=147936011103832&w=2)
[trivial adaptations]
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2016-11-17 10:48:45 +05:30
|
|
|
return delta_ns >= sg_policy->min_rate_limit_ns;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool sugov_up_down_rate_limit(struct sugov_policy *sg_policy, u64 time,
|
|
|
|
unsigned int next_freq)
|
|
|
|
{
|
|
|
|
s64 delta_ns;
|
|
|
|
|
|
|
|
delta_ns = time - sg_policy->last_freq_update_time;
|
|
|
|
|
|
|
|
if (next_freq > sg_policy->next_freq &&
|
|
|
|
delta_ns < sg_policy->up_rate_delay_ns)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (next_freq < sg_policy->next_freq &&
|
|
|
|
delta_ns < sg_policy->down_rate_delay_ns)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
2016-04-02 01:09:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
|
|
|
|
unsigned int next_freq)
|
|
|
|
{
|
|
|
|
struct cpufreq_policy *policy = sg_policy->policy;
|
|
|
|
|
2017-11-08 19:47:36 +05:30
|
|
|
if (sugov_up_down_rate_limit(sg_policy, time, next_freq)) {
|
|
|
|
/* Reset cached freq as next_freq isn't changed */
|
|
|
|
sg_policy->cached_raw_freq = 0;
|
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
The rate-limit tunable in the schedutil governor applies to transitions
to both lower and higher frequencies. On several platforms it is not the
ideal tunable though, as it is difficult to get best power/performance
figures using the same limit in both directions.
It is common on mobile platforms with demanding user interfaces to want
to increase frequency rapidly for example but decrease slowly.
One of the example can be a case where we have short busy periods
followed by similar or longer idle periods. If we keep the rate-limit
high enough, we will not go to higher frequencies soon enough. On the
other hand, if we keep it too low, we will have too many frequency
transitions, as we will always reduce the frequency after the busy
period.
It would be very useful if we can set low rate-limit while increasing
the frequency (so that we can respond to the short busy periods quickly)
and high rate-limit while decreasing frequency (so that we don't reduce
the frequency immediately after the short busy period and that may avoid
frequency transitions before the next busy period).
Implement separate up/down transition rate limits. Note that the
governor avoids frequency recalculations for a period equal to minimum
of up and down rate-limit. A global mutex is also defined to protect
updates to min_rate_limit_us via two separate sysfs files.
Note that this wouldn't change behavior of the schedutil governor for
the platforms which wish to keep same values for both up and down rate
limits.
This is tested with the rt-app [1] on ARM Exynos, dual A15 processor
platform.
Testcase: Run a SCHED_OTHER thread on CPU0 which will emulate work-load
for X ms of busy period out of the total period of Y ms, i.e. Y - X ms
of idle period. The values of X/Y taken were: 20/40, 20/50, 20/70, i.e
idle periods of 20, 30 and 50 ms respectively. These were tested against
values of up/down rate limits as: 10/10 ms and 10/40 ms.
For every test we noticed a performance increase of 5-10% with the
schedutil governor, which was very much expected.
[Viresh]: Simplified user interface and introduced min_rate_limit_us +
mutex, rewrote commit log and included test results.
[1] https://github.com/scheduler-tools/rt-app/
Change-Id: I18720a83855b196b8e21dcdc8deae79131635b84
Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
(applied from https://marc.info/?l=linux-kernel&m=147936011103832&w=2)
[trivial adaptations]
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2016-11-17 10:48:45 +05:30
|
|
|
return;
|
2017-11-08 19:47:36 +05:30
|
|
|
}
|
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
The rate-limit tunable in the schedutil governor applies to transitions
to both lower and higher frequencies. On several platforms it is not the
ideal tunable though, as it is difficult to get best power/performance
figures using the same limit in both directions.
It is common on mobile platforms with demanding user interfaces to want
to increase frequency rapidly for example but decrease slowly.
One of the example can be a case where we have short busy periods
followed by similar or longer idle periods. If we keep the rate-limit
high enough, we will not go to higher frequencies soon enough. On the
other hand, if we keep it too low, we will have too many frequency
transitions, as we will always reduce the frequency after the busy
period.
It would be very useful if we can set low rate-limit while increasing
the frequency (so that we can respond to the short busy periods quickly)
and high rate-limit while decreasing frequency (so that we don't reduce
the frequency immediately after the short busy period and that may avoid
frequency transitions before the next busy period).
Implement separate up/down transition rate limits. Note that the
governor avoids frequency recalculations for a period equal to minimum
of up and down rate-limit. A global mutex is also defined to protect
updates to min_rate_limit_us via two separate sysfs files.
Note that this wouldn't change behavior of the schedutil governor for
the platforms which wish to keep same values for both up and down rate
limits.
This is tested with the rt-app [1] on ARM Exynos, dual A15 processor
platform.
Testcase: Run a SCHED_OTHER thread on CPU0 which will emulate work-load
for X ms of busy period out of the total period of Y ms, i.e. Y - X ms
of idle period. The values of X/Y taken were: 20/40, 20/50, 20/70, i.e
idle periods of 20, 30 and 50 ms respectively. These were tested against
values of up/down rate limits as: 10/10 ms and 10/40 ms.
For every test we noticed a performance increase of 5-10% with the
schedutil governor, which was very much expected.
[Viresh]: Simplified user interface and introduced min_rate_limit_us +
mutex, rewrote commit log and included test results.
[1] https://github.com/scheduler-tools/rt-app/
Change-Id: I18720a83855b196b8e21dcdc8deae79131635b84
Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
(applied from https://marc.info/?l=linux-kernel&m=147936011103832&w=2)
[trivial adaptations]
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2016-11-17 10:48:45 +05:30
|
|
|
|
2023-03-21 20:07:23 +04:00
|
|
|
if (policy->cur == next_freq)
|
2017-05-25 15:27:07 +01:00
|
|
|
return;
|
|
|
|
|
|
|
|
sg_policy->next_freq = next_freq;
|
|
|
|
sg_policy->last_freq_update_time = time;
|
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
if (policy->fast_switch_enabled) {
|
|
|
|
next_freq = cpufreq_driver_fast_switch(policy, next_freq);
|
2017-08-09 10:21:46 +05:30
|
|
|
if (!next_freq)
|
2016-04-02 01:09:12 +02:00
|
|
|
return;
|
|
|
|
|
|
|
|
policy->cur = next_freq;
|
|
|
|
trace_cpu_frequency(next_freq, smp_processor_id());
|
2018-05-22 15:55:53 -07:00
|
|
|
} else if (!sg_policy->work_in_progress) {
|
2016-04-02 01:09:12 +02:00
|
|
|
sg_policy->work_in_progress = true;
|
2020-04-04 13:06:34 +07:00
|
|
|
irq_work_queue(&sg_policy->irq_work);
|
2016-04-02 01:09:12 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-19 16:42:10 +09:00
|
|
|
#ifdef CONFIG_FREQVAR_TUNE
|
2020-07-28 00:51:52 +07:00
|
|
|
unsigned long freqvar_boost_vector(int cpu, unsigned long util);
|
2018-01-19 16:42:10 +09:00
|
|
|
#else
|
2020-07-28 00:51:52 +07:00
|
|
|
static inline unsigned long freqvar_boost_vector(int cpu, unsigned long util)
|
2018-01-19 16:42:10 +09:00
|
|
|
{
|
2020-07-28 00:51:52 +07:00
|
|
|
return util;
|
2018-01-19 16:42:10 +09:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
/**
|
|
|
|
* get_next_freq - Compute a new frequency for a given cpufreq policy.
|
2017-03-02 14:03:21 +05:30
|
|
|
* @sg_policy: schedutil policy object to compute the new frequency for.
|
2016-04-02 01:09:12 +02:00
|
|
|
* @util: Current CPU utilization.
|
|
|
|
* @max: CPU capacity.
|
|
|
|
*
|
|
|
|
* If the utilization is frequency-invariant, choose the new frequency to be
|
|
|
|
* proportional to it, that is
|
|
|
|
*
|
|
|
|
* next_freq = C * max_freq * util / max
|
|
|
|
*
|
|
|
|
* Otherwise, approximate the would-be frequency-invariant utilization by
|
|
|
|
* util_raw * (curr_freq / max_freq) which leads to
|
|
|
|
*
|
|
|
|
* next_freq = C * curr_freq * util_raw / max
|
|
|
|
*
|
|
|
|
* Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
|
2016-07-13 13:25:26 -07:00
|
|
|
*
|
|
|
|
* The lowest driver-supported frequency which is equal or greater than the raw
|
|
|
|
* next_freq (as calculated above) is returned, subject to policy min/max and
|
|
|
|
* cpufreq driver limitations.
|
2016-04-02 01:09:12 +02:00
|
|
|
*/
|
2017-03-02 14:03:21 +05:30
|
|
|
static unsigned int get_next_freq(struct sugov_policy *sg_policy,
|
|
|
|
unsigned long util, unsigned long max)
|
2016-04-02 01:09:12 +02:00
|
|
|
{
|
2016-07-13 13:25:26 -07:00
|
|
|
struct cpufreq_policy *policy = sg_policy->policy;
|
2016-04-02 01:09:12 +02:00
|
|
|
unsigned int freq = arch_scale_freq_invariant() ?
|
2020-02-04 13:44:48 +02:00
|
|
|
policy->max : policy->cur;
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
struct sugov_cpu *sg_cpu;
|
|
|
|
struct kair_class *vessel;
|
|
|
|
unsigned int delta_max, delta_min;
|
|
|
|
int util_delta;
|
|
|
|
unsigned int legacy_freq;
|
|
|
|
|
|
|
|
#ifdef KAIR_CLUSTER_TRAVERSING
|
|
|
|
unsigned int each;
|
|
|
|
unsigned int sigma_cpu = policy->cpu;
|
|
|
|
randomness most_rand = 0;
|
|
|
|
#endif
|
|
|
|
int cur_rand = KAIR_DIVERGING;
|
|
|
|
RV_DECLARE(rv);
|
|
|
|
#endif
|
2016-04-02 01:09:12 +02:00
|
|
|
|
2020-03-02 20:34:04 -08:00
|
|
|
if (sg_policy->tunables->exp_util)
|
2020-07-28 00:51:52 +07:00
|
|
|
freq = (freq + (freq >> 2)) * int_sqrt(util * 100 / max) / 10;
|
2020-03-02 20:34:04 -08:00
|
|
|
else
|
2020-07-28 00:51:52 +07:00
|
|
|
freq = (freq + (freq >> 2)) * util / max;
|
2016-07-13 13:25:26 -07:00
|
|
|
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
legacy_freq = freq;
|
|
|
|
|
|
|
|
if (sg_policy->tunables->fb_legacy)
|
|
|
|
goto skip_betting;
|
|
|
|
|
|
|
|
#ifndef KAIR_CLUSTER_TRAVERSING
|
|
|
|
sg_cpu = &per_cpu(sugov_cpu, policy->cpu);
|
|
|
|
vessel = sg_cpu->util_vessel;
|
|
|
|
|
|
|
|
if (!vessel)
|
|
|
|
goto skip_betting;
|
|
|
|
|
|
|
|
cur_rand = vessel->job_inferer(vessel);
|
|
|
|
if (cur_rand == KAIR_DIVERGING)
|
|
|
|
goto skip_betting;
|
|
|
|
#else
|
|
|
|
for_each_cpu(each, policy->cpus) {
|
|
|
|
sg_cpu = &per_cpu(sugov_cpu, each);
|
|
|
|
|
|
|
|
vessel = sg_cpu->util_vessel;
|
|
|
|
if (vessel) {
|
|
|
|
cur_rand = vessel->job_inferer(vessel);
|
|
|
|
if (cur_rand == KAIR_DIVERGING)
|
|
|
|
goto skip_betting;
|
|
|
|
else {
|
|
|
|
if (cur_rand > (int)most_rand) {
|
|
|
|
most_rand = (randomness)cur_rand;
|
|
|
|
sigma_cpu = each;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
goto skip_betting;
|
|
|
|
}
|
|
|
|
|
|
|
|
sg_cpu = &per_cpu(sugov_cpu, sigma_cpu);
|
|
|
|
vessel = sg_cpu->util_vessel;
|
|
|
|
#endif
|
|
|
|
util_delta = sg_cpu->util - sg_cpu->cached_util;
|
|
|
|
delta_max = sg_cpu->max - sg_cpu->cached_util;
|
|
|
|
delta_min = sg_cpu->cached_util;
|
|
|
|
|
|
|
|
RV_SET(rv, util_delta, delta_max, delta_min);
|
|
|
|
freq = vessel->cap_bettor(vessel, &rv, freq);
|
|
|
|
|
|
|
|
skip_betting:
|
|
|
|
trace_sugov_kair_freq(policy->cpu, util, max, cur_rand, legacy_freq, freq);
|
|
|
|
#endif
|
|
|
|
|
2018-05-09 16:05:24 +05:30
|
|
|
if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
|
2016-07-13 13:25:26 -07:00
|
|
|
return sg_policy->next_freq;
|
2018-05-09 16:05:24 +05:30
|
|
|
|
|
|
|
sg_policy->need_freq_update = false;
|
2017-03-02 14:03:20 +05:30
|
|
|
sg_policy->cached_raw_freq = freq;
|
2016-07-13 13:25:26 -07:00
|
|
|
return cpufreq_driver_resolve_freq(policy, freq);
|
2016-04-02 01:09:12 +02:00
|
|
|
}
|
|
|
|
|
2017-03-24 17:37:28 +00:00
|
|
|
static inline bool use_pelt(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_SCHED_WALT
|
|
|
|
return (!sysctl_sched_use_walt_cpu_util || walt_disabled);
|
|
|
|
#else
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2023-03-21 20:07:23 +04:00
|
|
|
unsigned int sched_rt_remove_ratio_for_freq = 0;
|
2020-02-04 13:44:48 +02:00
|
|
|
|
2016-08-25 15:59:17 -07:00
|
|
|
static void sugov_get_util(unsigned long *util, unsigned long *max, u64 time)
|
2016-08-16 22:14:55 +02:00
|
|
|
{
|
2016-08-25 15:59:17 -07:00
|
|
|
int cpu = smp_processor_id();
|
2020-04-04 13:06:34 +07:00
|
|
|
struct rq *rq = cpu_rq(cpu);
|
|
|
|
unsigned long max_cap, rt;
|
|
|
|
s64 delta;
|
2016-08-25 15:59:17 -07:00
|
|
|
|
|
|
|
max_cap = arch_scale_cpu_capacity(NULL, cpu);
|
|
|
|
|
2020-04-04 13:06:34 +07:00
|
|
|
sched_avg_update(rq);
|
|
|
|
delta = time - rq->age_stamp;
|
|
|
|
if (unlikely(delta < 0))
|
|
|
|
delta = 0;
|
|
|
|
rt = div64_u64(rq->rt_avg, sched_avg_period() + delta);
|
|
|
|
rt = (rt * max_cap) >> SCHED_CAPACITY_SHIFT;
|
|
|
|
|
2020-07-28 00:51:52 +07:00
|
|
|
#ifdef CONFIG_SCHED_EMS
|
2020-09-15 18:26:14 +03:00
|
|
|
*util = ml_boosted_cpu_util(cpu) + rt;
|
2020-07-28 00:51:52 +07:00
|
|
|
#else
|
ANDROID: sched/rt: Add schedtune accounting to rt task enqueue/dequeue
rt tasks are currently not eligible for schedtune boosting. Make it so
by adding enqueue/dequeue hooks.
For rt tasks, schedtune only acts as a frequency boosting framework, it
has no impact on placement decisions and the prefer_idle attribute is
not used.
Also prepare schedutil use of boosted util for rt task boosting
With this change, schedtune accounting will include rt class tasks,
however boosting currently only applies to the utilization provided by
fair class tasks. Sum up the tracked CPU utilization applying boost to
the aggregate util instead - this includes RT task util in the boosting
if any tasks are runnable.
Scenario 1, considering one CPU:
1x rt task running, util 250, boost 0
1x cfs task runnable, util 250, boost 50
previous util=250+(50pct_boosted_250) = 887
new util=50_pct_boosted_500 = 762
Scenario 2, considering one CPU:
1x rt task running, util 250, boost 50
1x cfs task runnable, util 250, boost 0
previous util=250+250 = 500
new util=50_pct_boosted_500 = 762
Scenario 3, considering one CPU:
1x rt task running, util 250, boost 50
1x cfs task runnable, util 250, boost 50
previous util=250+(50pct_boosted_250) = 887
new util=50_pct_boosted_500 = 762
Scenario 4:
1x rt task running, util 250, boost 50
previous util=250 = 250
new util=50_pct_boosted_250 = 637
Change-Id: Ie287cbd0692468525095b5024db9faac8b2f4878
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
2018-07-09 16:54:02 +01:00
|
|
|
*util = boosted_cpu_util(cpu, rt);
|
2020-07-28 00:51:52 +07:00
|
|
|
#endif
|
2017-03-24 17:37:28 +00:00
|
|
|
if (likely(use_pelt()))
|
2017-09-15 08:25:32 +08:00
|
|
|
*util = *util + rt;
|
2017-03-24 17:37:28 +00:00
|
|
|
|
2020-07-28 00:51:52 +07:00
|
|
|
*util = freqvar_boost_vector(cpu, *util);
|
2017-09-15 08:25:32 +08:00
|
|
|
*util = min(*util, max_cap);
|
2016-08-25 15:59:17 -07:00
|
|
|
*max = max_cap;
|
2020-07-28 00:51:52 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_SCHED_EMS
|
|
|
|
part_cpu_active_ratio(util, max, cpu);
|
|
|
|
#endif
|
2016-08-16 22:14:55 +02:00
|
|
|
}
|
|
|
|
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
static inline void sugov_util_collapse(struct sugov_cpu *sg_cpu)
|
|
|
|
{
|
|
|
|
struct kair_class *vessel = sg_cpu->util_vessel;
|
|
|
|
int util_delta = min(sg_cpu->max, sg_cpu->util) - sg_cpu->cached_util;
|
|
|
|
unsigned int delta_max = sg_cpu->max - sg_cpu->cached_util;
|
|
|
|
unsigned int delta_min = sg_cpu->cached_util;
|
|
|
|
|
|
|
|
RV_DECLARE(job);
|
|
|
|
|
|
|
|
if (vessel) {
|
|
|
|
RV_SET(job, util_delta, delta_max, delta_min);
|
|
|
|
vessel->job_learner(vessel, &job);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-09-10 00:00:31 +02:00
|
|
|
static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
|
|
|
|
unsigned int flags)
|
|
|
|
{
|
2017-05-18 22:46:10 -07:00
|
|
|
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
|
|
|
|
|
|
|
|
if (!sg_policy->tunables->iowait_boost_enable)
|
|
|
|
return;
|
|
|
|
|
2018-05-22 12:07:53 +01:00
|
|
|
if (sg_cpu->iowait_boost) {
|
|
|
|
s64 delta_ns = time - sg_cpu->last_update;
|
|
|
|
|
|
|
|
/* Clear iowait_boost if the CPU apprears to have been idle. */
|
|
|
|
if (delta_ns > TICK_NSEC) {
|
|
|
|
sg_cpu->iowait_boost = 0;
|
|
|
|
sg_cpu->iowait_boost_pending = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-09-10 00:00:31 +02:00
|
|
|
if (flags & SCHED_CPUFREQ_IOWAIT) {
|
2017-08-28 09:56:27 -07:00
|
|
|
if (sg_cpu->iowait_boost_pending)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sg_cpu->iowait_boost_pending = true;
|
|
|
|
|
|
|
|
if (sg_cpu->iowait_boost) {
|
|
|
|
sg_cpu->iowait_boost <<= 1;
|
|
|
|
if (sg_cpu->iowait_boost > sg_cpu->iowait_boost_max)
|
|
|
|
sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
|
|
|
|
} else {
|
|
|
|
sg_cpu->iowait_boost = sg_cpu->sg_policy->policy->min;
|
|
|
|
}
|
2016-09-10 00:00:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
|
|
|
|
unsigned long *max)
|
|
|
|
{
|
2017-07-23 08:54:26 -07:00
|
|
|
unsigned int boost_util, boost_max;
|
2016-09-10 00:00:31 +02:00
|
|
|
|
2017-08-28 09:56:27 -07:00
|
|
|
if (!sg_cpu->iowait_boost)
|
2016-09-10 00:00:31 +02:00
|
|
|
return;
|
|
|
|
|
2017-08-28 09:56:27 -07:00
|
|
|
if (sg_cpu->iowait_boost_pending) {
|
|
|
|
sg_cpu->iowait_boost_pending = false;
|
|
|
|
} else {
|
|
|
|
sg_cpu->iowait_boost >>= 1;
|
|
|
|
if (sg_cpu->iowait_boost < sg_cpu->sg_policy->policy->min) {
|
|
|
|
sg_cpu->iowait_boost = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
boost_util = sg_cpu->iowait_boost;
|
|
|
|
boost_max = sg_cpu->iowait_boost_max;
|
|
|
|
|
2016-09-10 00:00:31 +02:00
|
|
|
if (*util * boost_max < *max * boost_util) {
|
|
|
|
*util = boost_util;
|
|
|
|
*max = boost_max;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
UPSTREAM: cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
The way the schedutil governor uses the PELT metric causes it to
underestimate the CPU utilization in some cases.
That can be easily demonstrated by running kernel compilation on
a Sandy Bridge Intel processor, running turbostat in parallel with
it and looking at the values written to the MSR_IA32_PERF_CTL
register. Namely, the expected result would be that when all CPUs
were 100% busy, all of them would be requested to run in the maximum
P-state, but observation shows that this clearly isn't the case.
The CPUs run in the maximum P-state for a while and then are
requested to run slower and go back to the maximum P-state after
a while again. That causes the actual frequency of the processor to
visibly oscillate below the sustainable maximum in a jittery fashion
which clearly is not desirable.
That has been attributed to CPU utilization metric updates on task
migration that cause the total utilization value for the CPU to be
reduced by the utilization of the migrated task. If that happens,
the schedutil governor may see a CPU utilization reduction and will
attempt to reduce the CPU frequency accordingly right away. That
may be premature, though, for example if the system is generally
busy and there are other runnable tasks waiting to be run on that
CPU already.
This is unlikely to be an issue on systems where cpufreq policies are
shared between multiple CPUs, because in those cases the policy
utilization is computed as the maximum of the CPU utilization values
over the whole policy and if that turns out to be low, reducing the
frequency for the policy most likely is a good idea anyway. On
systems with one CPU per policy, however, it may affect performance
adversely and even lead to increased energy consumption in some cases.
On those systems it may be addressed by taking another utilization
metric into consideration, like whether or not the CPU whose
frequency is about to be reduced has been idle recently, because if
that's not the case, the CPU is likely to be busy in the near future
and its frequency should not be reduced.
To that end, use the counter of idle calls in the timekeeping code.
Namely, make the schedutil governor look at that counter for the
current CPU every time before its frequency is about to be reduced.
If the counter has not changed since the previous iteration of the
governor computations for that CPU, the CPU has been busy for all
that time and its frequency should not be decreased, so if the new
frequency would be lower than the one set previously, the governor
will skip the frequency update.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Joel Fernandes <joelaf@google.com>
(cherry picked from commit b7eaf1aab9f8bd2e49fceed77ebc66c1b5800718)
(simple CPUFREQ_RT_DL vs CPUFREQ_DL usage conflicts)
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Change-Id: I531ec02c052944ee07a904dc2a25c59948ee762b
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2017-05-25 15:24:58 +01:00
|
|
|
#ifdef CONFIG_NO_HZ_COMMON
|
|
|
|
static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
|
|
|
|
{
|
|
|
|
unsigned long idle_calls = tick_nohz_get_idle_calls();
|
|
|
|
bool ret = idle_calls == sg_cpu->saved_idle_calls;
|
|
|
|
|
|
|
|
sg_cpu->saved_idle_calls = idle_calls;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
|
|
|
|
#endif /* CONFIG_NO_HZ_COMMON */
|
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
static void sugov_update_single(struct update_util_data *hook, u64 time,
|
2016-08-16 22:14:55 +02:00
|
|
|
unsigned int flags)
|
2016-04-02 01:09:12 +02:00
|
|
|
{
|
|
|
|
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
|
|
|
|
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
|
|
|
|
struct cpufreq_policy *policy = sg_policy->policy;
|
2016-08-16 22:14:55 +02:00
|
|
|
unsigned long util, max;
|
2016-04-02 01:09:12 +02:00
|
|
|
unsigned int next_f;
|
UPSTREAM: cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
The way the schedutil governor uses the PELT metric causes it to
underestimate the CPU utilization in some cases.
That can be easily demonstrated by running kernel compilation on
a Sandy Bridge Intel processor, running turbostat in parallel with
it and looking at the values written to the MSR_IA32_PERF_CTL
register. Namely, the expected result would be that when all CPUs
were 100% busy, all of them would be requested to run in the maximum
P-state, but observation shows that this clearly isn't the case.
The CPUs run in the maximum P-state for a while and then are
requested to run slower and go back to the maximum P-state after
a while again. That causes the actual frequency of the processor to
visibly oscillate below the sustainable maximum in a jittery fashion
which clearly is not desirable.
That has been attributed to CPU utilization metric updates on task
migration that cause the total utilization value for the CPU to be
reduced by the utilization of the migrated task. If that happens,
the schedutil governor may see a CPU utilization reduction and will
attempt to reduce the CPU frequency accordingly right away. That
may be premature, though, for example if the system is generally
busy and there are other runnable tasks waiting to be run on that
CPU already.
This is unlikely to be an issue on systems where cpufreq policies are
shared between multiple CPUs, because in those cases the policy
utilization is computed as the maximum of the CPU utilization values
over the whole policy and if that turns out to be low, reducing the
frequency for the policy most likely is a good idea anyway. On
systems with one CPU per policy, however, it may affect performance
adversely and even lead to increased energy consumption in some cases.
On those systems it may be addressed by taking another utilization
metric into consideration, like whether or not the CPU whose
frequency is about to be reduced has been idle recently, because if
that's not the case, the CPU is likely to be busy in the near future
and its frequency should not be reduced.
To that end, use the counter of idle calls in the timekeeping code.
Namely, make the schedutil governor look at that counter for the
current CPU every time before its frequency is about to be reduced.
If the counter has not changed since the previous iteration of the
governor computations for that CPU, the CPU has been busy for all
that time and its frequency should not be decreased, so if the new
frequency would be lower than the one set previously, the governor
will skip the frequency update.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Joel Fernandes <joelaf@google.com>
(cherry picked from commit b7eaf1aab9f8bd2e49fceed77ebc66c1b5800718)
(simple CPUFREQ_RT_DL vs CPUFREQ_DL usage conflicts)
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Change-Id: I531ec02c052944ee07a904dc2a25c59948ee762b
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2017-05-25 15:24:58 +01:00
|
|
|
bool busy;
|
2016-04-02 01:09:12 +02:00
|
|
|
|
2016-09-10 00:00:31 +02:00
|
|
|
sugov_set_iowait_boost(sg_cpu, time, flags);
|
|
|
|
sg_cpu->last_update = time;
|
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
if (!sugov_should_update_freq(sg_policy, time))
|
|
|
|
return;
|
|
|
|
|
2019-08-07 12:36:01 +05:30
|
|
|
/* Limits may have changed, don't skip frequency update */
|
|
|
|
busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu);
|
UPSTREAM: cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
The way the schedutil governor uses the PELT metric causes it to
underestimate the CPU utilization in some cases.
That can be easily demonstrated by running kernel compilation on
a Sandy Bridge Intel processor, running turbostat in parallel with
it and looking at the values written to the MSR_IA32_PERF_CTL
register. Namely, the expected result would be that when all CPUs
were 100% busy, all of them would be requested to run in the maximum
P-state, but observation shows that this clearly isn't the case.
The CPUs run in the maximum P-state for a while and then are
requested to run slower and go back to the maximum P-state after
a while again. That causes the actual frequency of the processor to
visibly oscillate below the sustainable maximum in a jittery fashion
which clearly is not desirable.
That has been attributed to CPU utilization metric updates on task
migration that cause the total utilization value for the CPU to be
reduced by the utilization of the migrated task. If that happens,
the schedutil governor may see a CPU utilization reduction and will
attempt to reduce the CPU frequency accordingly right away. That
may be premature, though, for example if the system is generally
busy and there are other runnable tasks waiting to be run on that
CPU already.
This is unlikely to be an issue on systems where cpufreq policies are
shared between multiple CPUs, because in those cases the policy
utilization is computed as the maximum of the CPU utilization values
over the whole policy and if that turns out to be low, reducing the
frequency for the policy most likely is a good idea anyway. On
systems with one CPU per policy, however, it may affect performance
adversely and even lead to increased energy consumption in some cases.
On those systems it may be addressed by taking another utilization
metric into consideration, like whether or not the CPU whose
frequency is about to be reduced has been idle recently, because if
that's not the case, the CPU is likely to be busy in the near future
and its frequency should not be reduced.
To that end, use the counter of idle calls in the timekeeping code.
Namely, make the schedutil governor look at that counter for the
current CPU every time before its frequency is about to be reduced.
If the counter has not changed since the previous iteration of the
governor computations for that CPU, the CPU has been busy for all
that time and its frequency should not be decreased, so if the new
frequency would be lower than the one set previously, the governor
will skip the frequency update.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Joel Fernandes <joelaf@google.com>
(cherry picked from commit b7eaf1aab9f8bd2e49fceed77ebc66c1b5800718)
(simple CPUFREQ_RT_DL vs CPUFREQ_DL usage conflicts)
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Change-Id: I531ec02c052944ee07a904dc2a25c59948ee762b
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2017-05-25 15:24:58 +01:00
|
|
|
|
2016-08-25 15:59:17 -07:00
|
|
|
if (flags & SCHED_CPUFREQ_DL) {
|
2016-08-16 22:14:55 +02:00
|
|
|
next_f = policy->cpuinfo.max_freq;
|
|
|
|
} else {
|
2016-08-25 15:59:17 -07:00
|
|
|
sugov_get_util(&util, &max, time);
|
2016-09-10 00:00:31 +02:00
|
|
|
sugov_iowait_boost(sg_cpu, &util, &max);
|
2017-03-02 14:03:21 +05:30
|
|
|
next_f = get_next_freq(sg_policy, util, max);
|
UPSTREAM: cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
The way the schedutil governor uses the PELT metric causes it to
underestimate the CPU utilization in some cases.
That can be easily demonstrated by running kernel compilation on
a Sandy Bridge Intel processor, running turbostat in parallel with
it and looking at the values written to the MSR_IA32_PERF_CTL
register. Namely, the expected result would be that when all CPUs
were 100% busy, all of them would be requested to run in the maximum
P-state, but observation shows that this clearly isn't the case.
The CPUs run in the maximum P-state for a while and then are
requested to run slower and go back to the maximum P-state after
a while again. That causes the actual frequency of the processor to
visibly oscillate below the sustainable maximum in a jittery fashion
which clearly is not desirable.
That has been attributed to CPU utilization metric updates on task
migration that cause the total utilization value for the CPU to be
reduced by the utilization of the migrated task. If that happens,
the schedutil governor may see a CPU utilization reduction and will
attempt to reduce the CPU frequency accordingly right away. That
may be premature, though, for example if the system is generally
busy and there are other runnable tasks waiting to be run on that
CPU already.
This is unlikely to be an issue on systems where cpufreq policies are
shared between multiple CPUs, because in those cases the policy
utilization is computed as the maximum of the CPU utilization values
over the whole policy and if that turns out to be low, reducing the
frequency for the policy most likely is a good idea anyway. On
systems with one CPU per policy, however, it may affect performance
adversely and even lead to increased energy consumption in some cases.
On those systems it may be addressed by taking another utilization
metric into consideration, like whether or not the CPU whose
frequency is about to be reduced has been idle recently, because if
that's not the case, the CPU is likely to be busy in the near future
and its frequency should not be reduced.
To that end, use the counter of idle calls in the timekeeping code.
Namely, make the schedutil governor look at that counter for the
current CPU every time before its frequency is about to be reduced.
If the counter has not changed since the previous iteration of the
governor computations for that CPU, the CPU has been busy for all
that time and its frequency should not be decreased, so if the new
frequency would be lower than the one set previously, the governor
will skip the frequency update.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Joel Fernandes <joelaf@google.com>
(cherry picked from commit b7eaf1aab9f8bd2e49fceed77ebc66c1b5800718)
(simple CPUFREQ_RT_DL vs CPUFREQ_DL usage conflicts)
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Change-Id: I531ec02c052944ee07a904dc2a25c59948ee762b
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2017-05-25 15:24:58 +01:00
|
|
|
/*
|
|
|
|
* Do not reduce the frequency if the CPU has not been idle
|
|
|
|
* recently, as the reduction is likely to be premature then.
|
|
|
|
*/
|
2018-05-09 11:44:56 +02:00
|
|
|
if (busy && next_f < sg_policy->next_freq &&
|
|
|
|
sg_policy->next_freq != UINT_MAX) {
|
UPSTREAM: cpufreq: schedutil: Avoid reducing frequency of busy CPUs prematurely
The way the schedutil governor uses the PELT metric causes it to
underestimate the CPU utilization in some cases.
That can be easily demonstrated by running kernel compilation on
a Sandy Bridge Intel processor, running turbostat in parallel with
it and looking at the values written to the MSR_IA32_PERF_CTL
register. Namely, the expected result would be that when all CPUs
were 100% busy, all of them would be requested to run in the maximum
P-state, but observation shows that this clearly isn't the case.
The CPUs run in the maximum P-state for a while and then are
requested to run slower and go back to the maximum P-state after
a while again. That causes the actual frequency of the processor to
visibly oscillate below the sustainable maximum in a jittery fashion
which clearly is not desirable.
That has been attributed to CPU utilization metric updates on task
migration that cause the total utilization value for the CPU to be
reduced by the utilization of the migrated task. If that happens,
the schedutil governor may see a CPU utilization reduction and will
attempt to reduce the CPU frequency accordingly right away. That
may be premature, though, for example if the system is generally
busy and there are other runnable tasks waiting to be run on that
CPU already.
This is unlikely to be an issue on systems where cpufreq policies are
shared between multiple CPUs, because in those cases the policy
utilization is computed as the maximum of the CPU utilization values
over the whole policy and if that turns out to be low, reducing the
frequency for the policy most likely is a good idea anyway. On
systems with one CPU per policy, however, it may affect performance
adversely and even lead to increased energy consumption in some cases.
On those systems it may be addressed by taking another utilization
metric into consideration, like whether or not the CPU whose
frequency is about to be reduced has been idle recently, because if
that's not the case, the CPU is likely to be busy in the near future
and its frequency should not be reduced.
To that end, use the counter of idle calls in the timekeeping code.
Namely, make the schedutil governor look at that counter for the
current CPU every time before its frequency is about to be reduced.
If the counter has not changed since the previous iteration of the
governor computations for that CPU, the CPU has been busy for all
that time and its frequency should not be decreased, so if the new
frequency would be lower than the one set previously, the governor
will skip the frequency update.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Joel Fernandes <joelaf@google.com>
(cherry picked from commit b7eaf1aab9f8bd2e49fceed77ebc66c1b5800718)
(simple CPUFREQ_RT_DL vs CPUFREQ_DL usage conflicts)
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Change-Id: I531ec02c052944ee07a904dc2a25c59948ee762b
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2017-05-25 15:24:58 +01:00
|
|
|
next_f = sg_policy->next_freq;
|
2017-11-08 19:47:36 +05:30
|
|
|
|
|
|
|
/* Reset cached freq as next_freq has changed */
|
|
|
|
sg_policy->cached_raw_freq = 0;
|
|
|
|
}
|
2016-08-16 22:14:55 +02:00
|
|
|
}
|
2016-04-02 01:09:12 +02:00
|
|
|
sugov_update_commit(sg_policy, time, next_f);
|
|
|
|
}
|
|
|
|
|
2020-04-04 13:06:34 +07:00
|
|
|
static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
|
2016-04-02 01:09:12 +02:00
|
|
|
{
|
2016-07-13 13:25:26 -07:00
|
|
|
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
|
2016-04-02 01:09:12 +02:00
|
|
|
struct cpufreq_policy *policy = sg_policy->policy;
|
2017-05-25 15:22:59 +01:00
|
|
|
unsigned long util = 0, max = 1;
|
2016-04-02 01:09:12 +02:00
|
|
|
unsigned int j;
|
|
|
|
|
2020-04-04 13:06:34 +07:00
|
|
|
for_each_cpu(j, policy->cpus) {
|
2017-05-25 15:22:59 +01:00
|
|
|
struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
|
2016-04-02 01:09:12 +02:00
|
|
|
unsigned long j_util, j_max;
|
|
|
|
s64 delta_ns;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the CPU utilization was last updated before the previous
|
|
|
|
* frequency update and the time elapsed between the last update
|
|
|
|
* of the CPU utilization and the last frequency update is long
|
|
|
|
* enough, don't take the CPU into account as it probably is
|
2016-09-10 00:00:31 +02:00
|
|
|
* idle now (and clear iowait_boost for it).
|
2016-04-02 01:09:12 +02:00
|
|
|
*/
|
2020-04-04 13:06:34 +07:00
|
|
|
delta_ns = time - j_sg_cpu->last_update;
|
2016-09-10 00:00:31 +02:00
|
|
|
if (delta_ns > TICK_NSEC) {
|
|
|
|
j_sg_cpu->iowait_boost = 0;
|
2017-08-28 09:56:27 -07:00
|
|
|
j_sg_cpu->iowait_boost_pending = false;
|
2016-04-02 01:09:12 +02:00
|
|
|
continue;
|
2016-09-10 00:00:31 +02:00
|
|
|
}
|
2016-08-25 15:59:17 -07:00
|
|
|
if (j_sg_cpu->flags & SCHED_CPUFREQ_DL)
|
2017-05-25 15:22:59 +01:00
|
|
|
return policy->cpuinfo.max_freq;
|
2016-04-02 01:09:12 +02:00
|
|
|
|
2016-08-16 22:14:55 +02:00
|
|
|
j_util = j_sg_cpu->util;
|
2016-04-02 01:09:12 +02:00
|
|
|
j_max = j_sg_cpu->max;
|
2017-10-12 15:56:12 -07:00
|
|
|
if (j_util * max >= j_max * util) {
|
2016-04-02 01:09:12 +02:00
|
|
|
util = j_util;
|
|
|
|
max = j_max;
|
|
|
|
}
|
2016-09-10 00:00:31 +02:00
|
|
|
|
|
|
|
sugov_iowait_boost(j_sg_cpu, &util, &max);
|
2016-04-02 01:09:12 +02:00
|
|
|
}
|
|
|
|
|
2017-03-02 14:03:21 +05:30
|
|
|
return get_next_freq(sg_policy, util, max);
|
2016-04-02 01:09:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static void sugov_update_shared(struct update_util_data *hook, u64 time,
|
2016-08-16 22:14:55 +02:00
|
|
|
unsigned int flags)
|
2016-04-02 01:09:12 +02:00
|
|
|
{
|
|
|
|
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
|
|
|
|
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
|
2016-08-16 22:14:55 +02:00
|
|
|
unsigned long util, max;
|
2016-04-02 01:09:12 +02:00
|
|
|
unsigned int next_f;
|
|
|
|
|
2016-08-25 15:59:17 -07:00
|
|
|
sugov_get_util(&util, &max, time);
|
2016-08-16 22:14:55 +02:00
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
raw_spin_lock(&sg_policy->update_lock);
|
|
|
|
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
sg_cpu->cached_util = min(max, sg_cpu->max ?
|
|
|
|
mult_frac(sg_cpu->util, max, sg_cpu->max) : sg_cpu->util);
|
|
|
|
#endif
|
2016-04-02 01:09:12 +02:00
|
|
|
sg_cpu->util = util;
|
|
|
|
sg_cpu->max = max;
|
2016-08-16 22:14:55 +02:00
|
|
|
sg_cpu->flags = flags;
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
sugov_util_collapse(sg_cpu);
|
|
|
|
#endif
|
2016-09-10 00:00:31 +02:00
|
|
|
sugov_set_iowait_boost(sg_cpu, time, flags);
|
2016-04-02 01:09:12 +02:00
|
|
|
sg_cpu->last_update = time;
|
|
|
|
|
|
|
|
if (sugov_should_update_freq(sg_policy, time)) {
|
2017-05-25 15:22:59 +01:00
|
|
|
if (flags & SCHED_CPUFREQ_DL)
|
|
|
|
next_f = sg_policy->policy->cpuinfo.max_freq;
|
|
|
|
else
|
2020-04-04 13:06:34 +07:00
|
|
|
next_f = sugov_next_freq_shared(sg_cpu, time);
|
2017-05-25 15:22:59 +01:00
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
sugov_update_commit(sg_policy, time, next_f);
|
|
|
|
}
|
|
|
|
|
|
|
|
raw_spin_unlock(&sg_policy->update_lock);
|
|
|
|
}
|
|
|
|
|
2020-04-04 13:06:34 +07:00
|
|
|
static void sugov_work(struct kthread_work *work)
|
2016-04-02 01:09:12 +02:00
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
|
2018-05-22 15:55:53 -07:00
|
|
|
unsigned int freq;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hold sg_policy->update_lock shortly to handle the case where:
|
|
|
|
* incase sg_policy->next_freq is read here, and then updated by
|
|
|
|
* sugov_update_shared just before work_in_progress is set to false
|
|
|
|
* here, we may miss queueing the new update.
|
|
|
|
*
|
|
|
|
* Note: If a work was queued after the update_lock is released,
|
|
|
|
* sugov_work will just be called again by kthread_work code; and the
|
|
|
|
* request will be proceed before the sugov thread sleeps.
|
|
|
|
*/
|
|
|
|
raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
|
|
|
|
freq = sg_policy->next_freq;
|
|
|
|
sg_policy->work_in_progress = false;
|
|
|
|
raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
|
2016-04-02 01:09:12 +02:00
|
|
|
|
|
|
|
mutex_lock(&sg_policy->work_lock);
|
2018-05-22 15:55:53 -07:00
|
|
|
__cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
|
2016-04-02 01:09:12 +02:00
|
|
|
mutex_unlock(&sg_policy->work_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sugov_irq_work(struct irq_work *irq_work)
|
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy;
|
|
|
|
|
|
|
|
sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
|
2016-11-15 13:53:22 +05:30
|
|
|
|
2020-04-04 13:06:34 +07:00
|
|
|
/*
|
|
|
|
* For RT and deadline tasks, the schedutil governor shoots the
|
|
|
|
* frequency to maximum. Special care must be taken to ensure that this
|
|
|
|
* kthread doesn't result in the same behavior.
|
|
|
|
*
|
|
|
|
* This is (mostly) guaranteed by the work_in_progress flag. The flag is
|
|
|
|
* updated only at the end of the sugov_work() function and before that
|
|
|
|
* the schedutil governor rejects all other frequency scaling requests.
|
|
|
|
*
|
|
|
|
* There is a very rare case though, where the RT thread yields right
|
|
|
|
* after the work_in_progress flag is cleared. The effects of that are
|
|
|
|
* neglected for now.
|
|
|
|
*/
|
|
|
|
kthread_queue_work(&sg_policy->worker, &sg_policy->work);
|
2016-04-02 01:09:12 +02:00
|
|
|
}
|
|
|
|
|
2018-01-19 16:42:10 +09:00
|
|
|
/************************ Governor externals ***********************/
|
|
|
|
static void update_min_rate_limit_us(struct sugov_policy *sg_policy);
|
|
|
|
void sugov_update_rate_limit_us(struct cpufreq_policy *policy,
|
|
|
|
int up_rate_limit, int down_rate_limit)
|
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy;
|
|
|
|
struct sugov_tunables *tunables;
|
|
|
|
|
|
|
|
sg_policy = policy->governor_data;
|
|
|
|
if (!sg_policy)
|
|
|
|
return;
|
|
|
|
|
|
|
|
tunables = sg_policy->tunables;
|
|
|
|
if (!tunables)
|
|
|
|
return;
|
|
|
|
|
|
|
|
tunables->up_rate_limit_us = (unsigned int)up_rate_limit;
|
|
|
|
tunables->down_rate_limit_us = (unsigned int)down_rate_limit;
|
|
|
|
|
|
|
|
sg_policy->up_rate_delay_ns = up_rate_limit * NSEC_PER_USEC;
|
|
|
|
sg_policy->down_rate_delay_ns = down_rate_limit * NSEC_PER_USEC;
|
|
|
|
|
|
|
|
update_min_rate_limit_us(sg_policy);
|
|
|
|
}
|
|
|
|
|
|
|
|
int sugov_sysfs_add_attr(struct cpufreq_policy *policy, const struct attribute *attr)
|
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy;
|
|
|
|
struct sugov_tunables *tunables;
|
|
|
|
|
|
|
|
sg_policy = policy->governor_data;
|
|
|
|
if (!sg_policy)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
tunables = sg_policy->tunables;
|
|
|
|
if (!tunables)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
return sysfs_create_file(&tunables->attr_set.kobj, attr);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct cpufreq_policy *sugov_get_attr_policy(struct gov_attr_set *attr_set)
|
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy = list_first_entry(&attr_set->policy_list,
|
|
|
|
typeof(*sg_policy), tunables_hook);
|
|
|
|
return sg_policy->policy;
|
|
|
|
}
|
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
/************************** sysfs interface ************************/
|
|
|
|
|
|
|
|
static struct sugov_tunables *global_tunables;
|
|
|
|
static DEFINE_MUTEX(global_tunables_lock);
|
|
|
|
|
|
|
|
static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
|
|
|
|
{
|
|
|
|
return container_of(attr_set, struct sugov_tunables, attr_set);
|
|
|
|
}
|
|
|
|
|
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
The rate-limit tunable in the schedutil governor applies to transitions
to both lower and higher frequencies. On several platforms it is not the
ideal tunable though, as it is difficult to get best power/performance
figures using the same limit in both directions.
It is common on mobile platforms with demanding user interfaces to want
to increase frequency rapidly for example but decrease slowly.
One of the example can be a case where we have short busy periods
followed by similar or longer idle periods. If we keep the rate-limit
high enough, we will not go to higher frequencies soon enough. On the
other hand, if we keep it too low, we will have too many frequency
transitions, as we will always reduce the frequency after the busy
period.
It would be very useful if we can set low rate-limit while increasing
the frequency (so that we can respond to the short busy periods quickly)
and high rate-limit while decreasing frequency (so that we don't reduce
the frequency immediately after the short busy period and that may avoid
frequency transitions before the next busy period).
Implement separate up/down transition rate limits. Note that the
governor avoids frequency recalculations for a period equal to minimum
of up and down rate-limit. A global mutex is also defined to protect
updates to min_rate_limit_us via two separate sysfs files.
Note that this wouldn't change behavior of the schedutil governor for
the platforms which wish to keep same values for both up and down rate
limits.
This is tested with the rt-app [1] on ARM Exynos, dual A15 processor
platform.
Testcase: Run a SCHED_OTHER thread on CPU0 which will emulate work-load
for X ms of busy period out of the total period of Y ms, i.e. Y - X ms
of idle period. The values of X/Y taken were: 20/40, 20/50, 20/70, i.e
idle periods of 20, 30 and 50 ms respectively. These were tested against
values of up/down rate limits as: 10/10 ms and 10/40 ms.
For every test we noticed a performance increase of 5-10% with the
schedutil governor, which was very much expected.
[Viresh]: Simplified user interface and introduced min_rate_limit_us +
mutex, rewrote commit log and included test results.
[1] https://github.com/scheduler-tools/rt-app/
Change-Id: I18720a83855b196b8e21dcdc8deae79131635b84
Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
(applied from https://marc.info/?l=linux-kernel&m=147936011103832&w=2)
[trivial adaptations]
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2016-11-17 10:48:45 +05:30
|
|
|
static DEFINE_MUTEX(min_rate_lock);
|
|
|
|
|
|
|
|
static void update_min_rate_limit_us(struct sugov_policy *sg_policy)
|
|
|
|
{
|
|
|
|
mutex_lock(&min_rate_lock);
|
|
|
|
sg_policy->min_rate_limit_ns = min(sg_policy->up_rate_delay_ns,
|
|
|
|
sg_policy->down_rate_delay_ns);
|
|
|
|
mutex_unlock(&min_rate_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t up_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
|
|
|
|
{
|
|
|
|
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
|
|
|
|
|
|
|
|
return sprintf(buf, "%u\n", tunables->up_rate_limit_us);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t down_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
|
2016-04-02 01:09:12 +02:00
|
|
|
{
|
|
|
|
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
|
|
|
|
|
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
The rate-limit tunable in the schedutil governor applies to transitions
to both lower and higher frequencies. On several platforms it is not the
ideal tunable though, as it is difficult to get best power/performance
figures using the same limit in both directions.
It is common on mobile platforms with demanding user interfaces to want
to increase frequency rapidly for example but decrease slowly.
One of the example can be a case where we have short busy periods
followed by similar or longer idle periods. If we keep the rate-limit
high enough, we will not go to higher frequencies soon enough. On the
other hand, if we keep it too low, we will have too many frequency
transitions, as we will always reduce the frequency after the busy
period.
It would be very useful if we can set low rate-limit while increasing
the frequency (so that we can respond to the short busy periods quickly)
and high rate-limit while decreasing frequency (so that we don't reduce
the frequency immediately after the short busy period and that may avoid
frequency transitions before the next busy period).
Implement separate up/down transition rate limits. Note that the
governor avoids frequency recalculations for a period equal to minimum
of up and down rate-limit. A global mutex is also defined to protect
updates to min_rate_limit_us via two separate sysfs files.
Note that this wouldn't change behavior of the schedutil governor for
the platforms which wish to keep same values for both up and down rate
limits.
This is tested with the rt-app [1] on ARM Exynos, dual A15 processor
platform.
Testcase: Run a SCHED_OTHER thread on CPU0 which will emulate work-load
for X ms of busy period out of the total period of Y ms, i.e. Y - X ms
of idle period. The values of X/Y taken were: 20/40, 20/50, 20/70, i.e
idle periods of 20, 30 and 50 ms respectively. These were tested against
values of up/down rate limits as: 10/10 ms and 10/40 ms.
For every test we noticed a performance increase of 5-10% with the
schedutil governor, which was very much expected.
[Viresh]: Simplified user interface and introduced min_rate_limit_us +
mutex, rewrote commit log and included test results.
[1] https://github.com/scheduler-tools/rt-app/
Change-Id: I18720a83855b196b8e21dcdc8deae79131635b84
Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
(applied from https://marc.info/?l=linux-kernel&m=147936011103832&w=2)
[trivial adaptations]
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2016-11-17 10:48:45 +05:30
|
|
|
return sprintf(buf, "%u\n", tunables->down_rate_limit_us);
|
2016-04-02 01:09:12 +02:00
|
|
|
}
|
|
|
|
|
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
The rate-limit tunable in the schedutil governor applies to transitions
to both lower and higher frequencies. On several platforms it is not the
ideal tunable though, as it is difficult to get best power/performance
figures using the same limit in both directions.
It is common on mobile platforms with demanding user interfaces to want
to increase frequency rapidly for example but decrease slowly.
One of the example can be a case where we have short busy periods
followed by similar or longer idle periods. If we keep the rate-limit
high enough, we will not go to higher frequencies soon enough. On the
other hand, if we keep it too low, we will have too many frequency
transitions, as we will always reduce the frequency after the busy
period.
It would be very useful if we can set low rate-limit while increasing
the frequency (so that we can respond to the short busy periods quickly)
and high rate-limit while decreasing frequency (so that we don't reduce
the frequency immediately after the short busy period and that may avoid
frequency transitions before the next busy period).
Implement separate up/down transition rate limits. Note that the
governor avoids frequency recalculations for a period equal to minimum
of up and down rate-limit. A global mutex is also defined to protect
updates to min_rate_limit_us via two separate sysfs files.
Note that this wouldn't change behavior of the schedutil governor for
the platforms which wish to keep same values for both up and down rate
limits.
This is tested with the rt-app [1] on ARM Exynos, dual A15 processor
platform.
Testcase: Run a SCHED_OTHER thread on CPU0 which will emulate work-load
for X ms of busy period out of the total period of Y ms, i.e. Y - X ms
of idle period. The values of X/Y taken were: 20/40, 20/50, 20/70, i.e
idle periods of 20, 30 and 50 ms respectively. These were tested against
values of up/down rate limits as: 10/10 ms and 10/40 ms.
For every test we noticed a performance increase of 5-10% with the
schedutil governor, which was very much expected.
[Viresh]: Simplified user interface and introduced min_rate_limit_us +
mutex, rewrote commit log and included test results.
[1] https://github.com/scheduler-tools/rt-app/
Change-Id: I18720a83855b196b8e21dcdc8deae79131635b84
Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
(applied from https://marc.info/?l=linux-kernel&m=147936011103832&w=2)
[trivial adaptations]
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2016-11-17 10:48:45 +05:30
|
|
|
static ssize_t up_rate_limit_us_store(struct gov_attr_set *attr_set,
|
|
|
|
const char *buf, size_t count)
|
2016-04-02 01:09:12 +02:00
|
|
|
{
|
|
|
|
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
|
|
|
|
struct sugov_policy *sg_policy;
|
|
|
|
unsigned int rate_limit_us;
|
|
|
|
|
|
|
|
if (kstrtouint(buf, 10, &rate_limit_us))
|
|
|
|
return -EINVAL;
|
|
|
|
|
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
The rate-limit tunable in the schedutil governor applies to transitions
to both lower and higher frequencies. On several platforms it is not the
ideal tunable though, as it is difficult to get best power/performance
figures using the same limit in both directions.
It is common on mobile platforms with demanding user interfaces to want
to increase frequency rapidly for example but decrease slowly.
One of the example can be a case where we have short busy periods
followed by similar or longer idle periods. If we keep the rate-limit
high enough, we will not go to higher frequencies soon enough. On the
other hand, if we keep it too low, we will have too many frequency
transitions, as we will always reduce the frequency after the busy
period.
It would be very useful if we can set low rate-limit while increasing
the frequency (so that we can respond to the short busy periods quickly)
and high rate-limit while decreasing frequency (so that we don't reduce
the frequency immediately after the short busy period and that may avoid
frequency transitions before the next busy period).
Implement separate up/down transition rate limits. Note that the
governor avoids frequency recalculations for a period equal to minimum
of up and down rate-limit. A global mutex is also defined to protect
updates to min_rate_limit_us via two separate sysfs files.
Note that this wouldn't change behavior of the schedutil governor for
the platforms which wish to keep same values for both up and down rate
limits.
This is tested with the rt-app [1] on ARM Exynos, dual A15 processor
platform.
Testcase: Run a SCHED_OTHER thread on CPU0 which will emulate work-load
for X ms of busy period out of the total period of Y ms, i.e. Y - X ms
of idle period. The values of X/Y taken were: 20/40, 20/50, 20/70, i.e
idle periods of 20, 30 and 50 ms respectively. These were tested against
values of up/down rate limits as: 10/10 ms and 10/40 ms.
For every test we noticed a performance increase of 5-10% with the
schedutil governor, which was very much expected.
[Viresh]: Simplified user interface and introduced min_rate_limit_us +
mutex, rewrote commit log and included test results.
[1] https://github.com/scheduler-tools/rt-app/
Change-Id: I18720a83855b196b8e21dcdc8deae79131635b84
Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
(applied from https://marc.info/?l=linux-kernel&m=147936011103832&w=2)
[trivial adaptations]
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2016-11-17 10:48:45 +05:30
|
|
|
tunables->up_rate_limit_us = rate_limit_us;
|
2016-04-02 01:09:12 +02:00
|
|
|
|
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
The rate-limit tunable in the schedutil governor applies to transitions
to both lower and higher frequencies. On several platforms it is not the
ideal tunable though, as it is difficult to get best power/performance
figures using the same limit in both directions.
It is common on mobile platforms with demanding user interfaces to want
to increase frequency rapidly for example but decrease slowly.
One of the example can be a case where we have short busy periods
followed by similar or longer idle periods. If we keep the rate-limit
high enough, we will not go to higher frequencies soon enough. On the
other hand, if we keep it too low, we will have too many frequency
transitions, as we will always reduce the frequency after the busy
period.
It would be very useful if we can set low rate-limit while increasing
the frequency (so that we can respond to the short busy periods quickly)
and high rate-limit while decreasing frequency (so that we don't reduce
the frequency immediately after the short busy period and that may avoid
frequency transitions before the next busy period).
Implement separate up/down transition rate limits. Note that the
governor avoids frequency recalculations for a period equal to minimum
of up and down rate-limit. A global mutex is also defined to protect
updates to min_rate_limit_us via two separate sysfs files.
Note that this wouldn't change behavior of the schedutil governor for
the platforms which wish to keep same values for both up and down rate
limits.
This is tested with the rt-app [1] on ARM Exynos, dual A15 processor
platform.
Testcase: Run a SCHED_OTHER thread on CPU0 which will emulate work-load
for X ms of busy period out of the total period of Y ms, i.e. Y - X ms
of idle period. The values of X/Y taken were: 20/40, 20/50, 20/70, i.e
idle periods of 20, 30 and 50 ms respectively. These were tested against
values of up/down rate limits as: 10/10 ms and 10/40 ms.
For every test we noticed a performance increase of 5-10% with the
schedutil governor, which was very much expected.
[Viresh]: Simplified user interface and introduced min_rate_limit_us +
mutex, rewrote commit log and included test results.
[1] https://github.com/scheduler-tools/rt-app/
Change-Id: I18720a83855b196b8e21dcdc8deae79131635b84
Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
(applied from https://marc.info/?l=linux-kernel&m=147936011103832&w=2)
[trivial adaptations]
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2016-11-17 10:48:45 +05:30
|
|
|
list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
|
|
|
|
sg_policy->up_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
|
|
|
|
update_min_rate_limit_us(sg_policy);
|
|
|
|
}
|
2016-04-02 01:09:12 +02:00
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
The rate-limit tunable in the schedutil governor applies to transitions
to both lower and higher frequencies. On several platforms it is not the
ideal tunable though, as it is difficult to get best power/performance
figures using the same limit in both directions.
It is common on mobile platforms with demanding user interfaces to want
to increase frequency rapidly for example but decrease slowly.
One of the example can be a case where we have short busy periods
followed by similar or longer idle periods. If we keep the rate-limit
high enough, we will not go to higher frequencies soon enough. On the
other hand, if we keep it too low, we will have too many frequency
transitions, as we will always reduce the frequency after the busy
period.
It would be very useful if we can set low rate-limit while increasing
the frequency (so that we can respond to the short busy periods quickly)
and high rate-limit while decreasing frequency (so that we don't reduce
the frequency immediately after the short busy period and that may avoid
frequency transitions before the next busy period).
Implement separate up/down transition rate limits. Note that the
governor avoids frequency recalculations for a period equal to minimum
of up and down rate-limit. A global mutex is also defined to protect
updates to min_rate_limit_us via two separate sysfs files.
Note that this wouldn't change behavior of the schedutil governor for
the platforms which wish to keep same values for both up and down rate
limits.
This is tested with the rt-app [1] on ARM Exynos, dual A15 processor
platform.
Testcase: Run a SCHED_OTHER thread on CPU0 which will emulate work-load
for X ms of busy period out of the total period of Y ms, i.e. Y - X ms
of idle period. The values of X/Y taken were: 20/40, 20/50, 20/70, i.e
idle periods of 20, 30 and 50 ms respectively. These were tested against
values of up/down rate limits as: 10/10 ms and 10/40 ms.
For every test we noticed a performance increase of 5-10% with the
schedutil governor, which was very much expected.
[Viresh]: Simplified user interface and introduced min_rate_limit_us +
mutex, rewrote commit log and included test results.
[1] https://github.com/scheduler-tools/rt-app/
Change-Id: I18720a83855b196b8e21dcdc8deae79131635b84
Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
(applied from https://marc.info/?l=linux-kernel&m=147936011103832&w=2)
[trivial adaptations]
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2016-11-17 10:48:45 +05:30
|
|
|
static ssize_t down_rate_limit_us_store(struct gov_attr_set *attr_set,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
|
|
|
|
struct sugov_policy *sg_policy;
|
|
|
|
unsigned int rate_limit_us;
|
|
|
|
|
|
|
|
if (kstrtouint(buf, 10, &rate_limit_us))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
tunables->down_rate_limit_us = rate_limit_us;
|
|
|
|
|
|
|
|
list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
|
|
|
|
sg_policy->down_rate_delay_ns = rate_limit_us * NSEC_PER_USEC;
|
|
|
|
update_min_rate_limit_us(sg_policy);
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2017-05-18 22:46:10 -07:00
|
|
|
static ssize_t iowait_boost_enable_show(struct gov_attr_set *attr_set,
|
|
|
|
char *buf)
|
|
|
|
{
|
|
|
|
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
|
|
|
|
|
|
|
|
return sprintf(buf, "%u\n", tunables->iowait_boost_enable);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t iowait_boost_enable_store(struct gov_attr_set *attr_set,
|
|
|
|
const char *buf, size_t count)
|
|
|
|
{
|
|
|
|
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
|
|
|
|
bool enable;
|
|
|
|
|
|
|
|
if (kstrtobool(buf, &enable))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
tunables->iowait_boost_enable = enable;
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2020-03-02 20:34:04 -08:00
|
|
|
static ssize_t exp_util_show(struct gov_attr_set *attr_set, char *buf)
|
|
|
|
{
|
|
|
|
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
|
|
|
|
|
|
|
|
return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->exp_util);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t exp_util_store(struct gov_attr_set *attr_set, const char *buf,
|
|
|
|
size_t count)
|
|
|
|
{
|
|
|
|
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
|
|
|
|
|
|
|
|
if (kstrtobool(buf, &tunables->exp_util))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
static ssize_t fb_legacy_show(struct gov_attr_set *attr_set, char *buf)
|
|
|
|
{
|
|
|
|
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
|
|
|
|
|
|
|
|
return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->fb_legacy);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t fb_legacy_store(struct gov_attr_set *attr_set, const char *buf,
|
|
|
|
size_t count)
|
|
|
|
{
|
|
|
|
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
|
|
|
|
|
|
|
|
if (kstrtobool(buf, &tunables->fb_legacy))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
The rate-limit tunable in the schedutil governor applies to transitions
to both lower and higher frequencies. On several platforms it is not the
ideal tunable though, as it is difficult to get best power/performance
figures using the same limit in both directions.
It is common on mobile platforms with demanding user interfaces to want
to increase frequency rapidly for example but decrease slowly.
One of the example can be a case where we have short busy periods
followed by similar or longer idle periods. If we keep the rate-limit
high enough, we will not go to higher frequencies soon enough. On the
other hand, if we keep it too low, we will have too many frequency
transitions, as we will always reduce the frequency after the busy
period.
It would be very useful if we can set low rate-limit while increasing
the frequency (so that we can respond to the short busy periods quickly)
and high rate-limit while decreasing frequency (so that we don't reduce
the frequency immediately after the short busy period and that may avoid
frequency transitions before the next busy period).
Implement separate up/down transition rate limits. Note that the
governor avoids frequency recalculations for a period equal to minimum
of up and down rate-limit. A global mutex is also defined to protect
updates to min_rate_limit_us via two separate sysfs files.
Note that this wouldn't change behavior of the schedutil governor for
the platforms which wish to keep same values for both up and down rate
limits.
This is tested with the rt-app [1] on ARM Exynos, dual A15 processor
platform.
Testcase: Run a SCHED_OTHER thread on CPU0 which will emulate work-load
for X ms of busy period out of the total period of Y ms, i.e. Y - X ms
of idle period. The values of X/Y taken were: 20/40, 20/50, 20/70, i.e
idle periods of 20, 30 and 50 ms respectively. These were tested against
values of up/down rate limits as: 10/10 ms and 10/40 ms.
For every test we noticed a performance increase of 5-10% with the
schedutil governor, which was very much expected.
[Viresh]: Simplified user interface and introduced min_rate_limit_us +
mutex, rewrote commit log and included test results.
[1] https://github.com/scheduler-tools/rt-app/
Change-Id: I18720a83855b196b8e21dcdc8deae79131635b84
Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
(applied from https://marc.info/?l=linux-kernel&m=147936011103832&w=2)
[trivial adaptations]
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2016-11-17 10:48:45 +05:30
|
|
|
static struct governor_attr up_rate_limit_us = __ATTR_RW(up_rate_limit_us);
|
|
|
|
static struct governor_attr down_rate_limit_us = __ATTR_RW(down_rate_limit_us);
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
static struct governor_attr fb_legacy = __ATTR_RW(fb_legacy);
|
|
|
|
#endif
|
2017-05-18 22:46:10 -07:00
|
|
|
static struct governor_attr iowait_boost_enable = __ATTR_RW(iowait_boost_enable);
|
2020-03-02 20:34:04 -08:00
|
|
|
static struct governor_attr exp_util = __ATTR_RW(exp_util);
|
2016-04-02 01:09:12 +02:00
|
|
|
|
|
|
|
static struct attribute *sugov_attributes[] = {
|
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
The rate-limit tunable in the schedutil governor applies to transitions
to both lower and higher frequencies. On several platforms it is not the
ideal tunable though, as it is difficult to get best power/performance
figures using the same limit in both directions.
It is common on mobile platforms with demanding user interfaces to want
to increase frequency rapidly for example but decrease slowly.
One of the example can be a case where we have short busy periods
followed by similar or longer idle periods. If we keep the rate-limit
high enough, we will not go to higher frequencies soon enough. On the
other hand, if we keep it too low, we will have too many frequency
transitions, as we will always reduce the frequency after the busy
period.
It would be very useful if we can set low rate-limit while increasing
the frequency (so that we can respond to the short busy periods quickly)
and high rate-limit while decreasing frequency (so that we don't reduce
the frequency immediately after the short busy period and that may avoid
frequency transitions before the next busy period).
Implement separate up/down transition rate limits. Note that the
governor avoids frequency recalculations for a period equal to minimum
of up and down rate-limit. A global mutex is also defined to protect
updates to min_rate_limit_us via two separate sysfs files.
Note that this wouldn't change behavior of the schedutil governor for
the platforms which wish to keep same values for both up and down rate
limits.
This is tested with the rt-app [1] on ARM Exynos, dual A15 processor
platform.
Testcase: Run a SCHED_OTHER thread on CPU0 which will emulate work-load
for X ms of busy period out of the total period of Y ms, i.e. Y - X ms
of idle period. The values of X/Y taken were: 20/40, 20/50, 20/70, i.e
idle periods of 20, 30 and 50 ms respectively. These were tested against
values of up/down rate limits as: 10/10 ms and 10/40 ms.
For every test we noticed a performance increase of 5-10% with the
schedutil governor, which was very much expected.
[Viresh]: Simplified user interface and introduced min_rate_limit_us +
mutex, rewrote commit log and included test results.
[1] https://github.com/scheduler-tools/rt-app/
Change-Id: I18720a83855b196b8e21dcdc8deae79131635b84
Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
(applied from https://marc.info/?l=linux-kernel&m=147936011103832&w=2)
[trivial adaptations]
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2016-11-17 10:48:45 +05:30
|
|
|
&up_rate_limit_us.attr,
|
|
|
|
&down_rate_limit_us.attr,
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
&fb_legacy.attr,
|
|
|
|
#endif
|
2017-05-18 22:46:10 -07:00
|
|
|
&iowait_boost_enable.attr,
|
2020-03-02 20:34:04 -08:00
|
|
|
&exp_util.attr,
|
2016-04-02 01:09:12 +02:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct kobj_type sugov_tunables_ktype = {
|
|
|
|
.default_attrs = sugov_attributes,
|
|
|
|
.sysfs_ops = &governor_sysfs_ops,
|
|
|
|
};
|
|
|
|
|
|
|
|
/********************** cpufreq governor interface *********************/
|
|
|
|
|
|
|
|
static struct cpufreq_governor schedutil_gov;
|
|
|
|
|
|
|
|
static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
|
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy;
|
|
|
|
|
|
|
|
sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
|
|
|
|
if (!sg_policy)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
sg_policy->policy = policy;
|
|
|
|
raw_spin_lock_init(&sg_policy->update_lock);
|
|
|
|
return sg_policy;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sugov_policy_free(struct sugov_policy *sg_policy)
|
|
|
|
{
|
2020-02-04 13:44:48 +02:00
|
|
|
kfree(sg_policy);
|
2016-11-15 13:53:22 +05:30
|
|
|
}
|
|
|
|
|
2020-04-04 13:06:34 +07:00
|
|
|
static int sugov_kthread_create(struct sugov_policy *sg_policy)
|
|
|
|
{
|
|
|
|
struct task_struct *thread;
|
|
|
|
struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
|
|
|
|
struct cpufreq_policy *policy = sg_policy->policy;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* kthread only required for slow path */
|
|
|
|
if (policy->fast_switch_enabled)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
kthread_init_work(&sg_policy->work, sugov_work);
|
|
|
|
kthread_init_worker(&sg_policy->worker);
|
|
|
|
thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
|
|
|
|
"sugov:%d",
|
|
|
|
cpumask_first(policy->related_cpus));
|
|
|
|
if (IS_ERR(thread)) {
|
|
|
|
pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
|
|
|
|
return PTR_ERR(thread);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, ¶m);
|
|
|
|
if (ret) {
|
|
|
|
kthread_stop(thread);
|
|
|
|
pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
sg_policy->thread = thread;
|
|
|
|
kthread_bind_mask(thread, policy->related_cpus);
|
|
|
|
init_irq_work(&sg_policy->irq_work, sugov_irq_work);
|
|
|
|
mutex_init(&sg_policy->work_lock);
|
|
|
|
|
|
|
|
wake_up_process(thread);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sugov_kthread_stop(struct sugov_policy *sg_policy)
|
|
|
|
{
|
|
|
|
/* kthread only required for slow path */
|
|
|
|
if (sg_policy->policy->fast_switch_enabled)
|
|
|
|
return;
|
|
|
|
|
|
|
|
kthread_flush_worker(&sg_policy->worker);
|
|
|
|
kthread_stop(sg_policy->thread);
|
|
|
|
mutex_destroy(&sg_policy->work_lock);
|
|
|
|
}
|
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
|
|
|
|
{
|
|
|
|
struct sugov_tunables *tunables;
|
|
|
|
|
|
|
|
tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
|
|
|
|
if (tunables) {
|
|
|
|
gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
|
|
|
|
if (!have_governor_per_policy())
|
|
|
|
global_tunables = tunables;
|
|
|
|
}
|
|
|
|
return tunables;
|
|
|
|
}
|
|
|
|
|
2018-05-29 17:23:04 +02:00
|
|
|
static void sugov_tunables_save(struct cpufreq_policy *policy,
|
|
|
|
struct sugov_tunables *tunables)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
struct sugov_tunables *cached = per_cpu(cached_tunables, policy->cpu);
|
|
|
|
|
|
|
|
if (!have_governor_per_policy())
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!cached) {
|
|
|
|
cached = kzalloc(sizeof(*tunables), GFP_KERNEL);
|
|
|
|
if (!cached) {
|
|
|
|
pr_warn("Couldn't allocate tunables for caching\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
for_each_cpu(cpu, policy->related_cpus)
|
|
|
|
per_cpu(cached_tunables, cpu) = cached;
|
|
|
|
}
|
|
|
|
|
2020-03-02 20:34:04 -08:00
|
|
|
cached->exp_util = tunables->exp_util;
|
2018-05-29 17:23:04 +02:00
|
|
|
cached->up_rate_limit_us = tunables->up_rate_limit_us;
|
|
|
|
cached->down_rate_limit_us = tunables->down_rate_limit_us;
|
|
|
|
}
|
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
static void sugov_tunables_free(struct sugov_tunables *tunables)
|
|
|
|
{
|
|
|
|
if (!have_governor_per_policy())
|
|
|
|
global_tunables = NULL;
|
|
|
|
|
|
|
|
kfree(tunables);
|
|
|
|
}
|
|
|
|
|
2018-05-29 17:23:04 +02:00
|
|
|
static void sugov_tunables_restore(struct cpufreq_policy *policy)
|
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy = policy->governor_data;
|
|
|
|
struct sugov_tunables *tunables = sg_policy->tunables;
|
|
|
|
struct sugov_tunables *cached = per_cpu(cached_tunables, policy->cpu);
|
|
|
|
|
|
|
|
if (!cached)
|
|
|
|
return;
|
|
|
|
|
2020-03-02 20:34:04 -08:00
|
|
|
tunables->exp_util = cached->exp_util;
|
2018-05-29 17:23:04 +02:00
|
|
|
tunables->up_rate_limit_us = cached->up_rate_limit_us;
|
|
|
|
tunables->down_rate_limit_us = cached->down_rate_limit_us;
|
|
|
|
sg_policy->up_rate_delay_ns =
|
|
|
|
tunables->up_rate_limit_us * NSEC_PER_USEC;
|
|
|
|
sg_policy->down_rate_delay_ns =
|
|
|
|
tunables->down_rate_limit_us * NSEC_PER_USEC;
|
|
|
|
sg_policy->min_rate_limit_ns = min(sg_policy->up_rate_delay_ns,
|
|
|
|
sg_policy->down_rate_delay_ns);
|
|
|
|
}
|
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
static int sugov_init(struct cpufreq_policy *policy)
|
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy;
|
|
|
|
struct sugov_tunables *tunables;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* State should be equivalent to EXIT */
|
|
|
|
if (policy->governor_data)
|
|
|
|
return -EBUSY;
|
|
|
|
|
2017-07-20 16:32:35 +01:00
|
|
|
cpufreq_enable_fast_switch(policy);
|
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
sg_policy = sugov_policy_alloc(policy);
|
2017-07-20 16:32:35 +01:00
|
|
|
if (!sg_policy) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto disable_fast_switch;
|
|
|
|
}
|
2016-04-02 01:09:12 +02:00
|
|
|
|
2020-04-04 13:06:34 +07:00
|
|
|
ret = sugov_kthread_create(sg_policy);
|
|
|
|
if (ret)
|
|
|
|
goto free_sg_policy;
|
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
mutex_lock(&global_tunables_lock);
|
|
|
|
|
|
|
|
if (global_tunables) {
|
|
|
|
if (WARN_ON(have_governor_per_policy())) {
|
|
|
|
ret = -EINVAL;
|
2020-04-04 13:06:34 +07:00
|
|
|
goto stop_kthread;
|
2016-04-02 01:09:12 +02:00
|
|
|
}
|
|
|
|
policy->governor_data = sg_policy;
|
|
|
|
sg_policy->tunables = global_tunables;
|
|
|
|
|
|
|
|
gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
tunables = sugov_tunables_alloc(sg_policy);
|
|
|
|
if (!tunables) {
|
|
|
|
ret = -ENOMEM;
|
2020-04-04 13:06:34 +07:00
|
|
|
goto stop_kthread;
|
2016-04-02 01:09:12 +02:00
|
|
|
}
|
|
|
|
|
2020-04-04 13:06:34 +07:00
|
|
|
if (policy->up_transition_delay_us && policy->down_transition_delay_us) {
|
|
|
|
tunables->up_rate_limit_us = policy->up_transition_delay_us;
|
|
|
|
tunables->down_rate_limit_us = policy->down_transition_delay_us;
|
|
|
|
} else {
|
|
|
|
unsigned int lat;
|
|
|
|
|
|
|
|
tunables->up_rate_limit_us = LATENCY_MULTIPLIER;
|
|
|
|
tunables->down_rate_limit_us = LATENCY_MULTIPLIER;
|
|
|
|
lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
|
|
|
|
if (lat) {
|
|
|
|
tunables->up_rate_limit_us *= lat;
|
|
|
|
tunables->down_rate_limit_us *= lat;
|
|
|
|
}
|
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
The rate-limit tunable in the schedutil governor applies to transitions
to both lower and higher frequencies. On several platforms it is not the
ideal tunable though, as it is difficult to get best power/performance
figures using the same limit in both directions.
It is common on mobile platforms with demanding user interfaces to want
to increase frequency rapidly for example but decrease slowly.
One of the example can be a case where we have short busy periods
followed by similar or longer idle periods. If we keep the rate-limit
high enough, we will not go to higher frequencies soon enough. On the
other hand, if we keep it too low, we will have too many frequency
transitions, as we will always reduce the frequency after the busy
period.
It would be very useful if we can set low rate-limit while increasing
the frequency (so that we can respond to the short busy periods quickly)
and high rate-limit while decreasing frequency (so that we don't reduce
the frequency immediately after the short busy period and that may avoid
frequency transitions before the next busy period).
Implement separate up/down transition rate limits. Note that the
governor avoids frequency recalculations for a period equal to minimum
of up and down rate-limit. A global mutex is also defined to protect
updates to min_rate_limit_us via two separate sysfs files.
Note that this wouldn't change behavior of the schedutil governor for
the platforms which wish to keep same values for both up and down rate
limits.
This is tested with the rt-app [1] on ARM Exynos, dual A15 processor
platform.
Testcase: Run a SCHED_OTHER thread on CPU0 which will emulate work-load
for X ms of busy period out of the total period of Y ms, i.e. Y - X ms
of idle period. The values of X/Y taken were: 20/40, 20/50, 20/70, i.e
idle periods of 20, 30 and 50 ms respectively. These were tested against
values of up/down rate limits as: 10/10 ms and 10/40 ms.
For every test we noticed a performance increase of 5-10% with the
schedutil governor, which was very much expected.
[Viresh]: Simplified user interface and introduced min_rate_limit_us +
mutex, rewrote commit log and included test results.
[1] https://github.com/scheduler-tools/rt-app/
Change-Id: I18720a83855b196b8e21dcdc8deae79131635b84
Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
(applied from https://marc.info/?l=linux-kernel&m=147936011103832&w=2)
[trivial adaptations]
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2016-11-17 10:48:45 +05:30
|
|
|
}
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
tunables->fb_legacy = false;
|
|
|
|
sg_policy->be_stochastic = false;
|
|
|
|
#endif
|
2016-04-02 01:09:12 +02:00
|
|
|
|
2017-05-18 22:46:10 -07:00
|
|
|
tunables->iowait_boost_enable = policy->iowait_boost_enable;
|
2020-03-02 20:34:04 -08:00
|
|
|
tunables->exp_util = false;
|
2017-05-18 22:46:10 -07:00
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
policy->governor_data = sg_policy;
|
|
|
|
sg_policy->tunables = tunables;
|
|
|
|
|
2018-05-29 17:23:04 +02:00
|
|
|
sugov_tunables_restore(policy);
|
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
|
|
|
|
get_governor_parent_kobj(policy), "%s",
|
|
|
|
schedutil_gov.name);
|
|
|
|
if (ret)
|
|
|
|
goto fail;
|
|
|
|
|
2017-05-25 15:04:04 +01:00
|
|
|
out:
|
2016-04-02 01:09:12 +02:00
|
|
|
mutex_unlock(&global_tunables_lock);
|
|
|
|
return 0;
|
|
|
|
|
2017-05-25 15:04:04 +01:00
|
|
|
fail:
|
2019-04-30 10:11:44 +10:00
|
|
|
kobject_put(&tunables->attr_set.kobj);
|
2016-04-02 01:09:12 +02:00
|
|
|
policy->governor_data = NULL;
|
|
|
|
sugov_tunables_free(tunables);
|
|
|
|
|
2020-04-04 13:06:34 +07:00
|
|
|
stop_kthread:
|
|
|
|
sugov_kthread_stop(sg_policy);
|
2016-04-02 01:09:12 +02:00
|
|
|
mutex_unlock(&global_tunables_lock);
|
|
|
|
|
2018-03-29 15:43:01 +01:00
|
|
|
free_sg_policy:
|
2016-04-02 01:09:12 +02:00
|
|
|
sugov_policy_free(sg_policy);
|
2017-07-20 16:32:35 +01:00
|
|
|
|
|
|
|
disable_fast_switch:
|
|
|
|
cpufreq_disable_fast_switch(policy);
|
|
|
|
|
2016-05-18 17:55:28 +05:30
|
|
|
pr_err("initialization failed (error %d)\n", ret);
|
2016-04-02 01:09:12 +02:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-06-02 23:24:15 +02:00
|
|
|
static void sugov_exit(struct cpufreq_policy *policy)
|
2016-04-02 01:09:12 +02:00
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy = policy->governor_data;
|
|
|
|
struct sugov_tunables *tunables = sg_policy->tunables;
|
|
|
|
unsigned int count;
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, policy->cpu);
|
|
|
|
#endif
|
2016-04-02 01:09:12 +02:00
|
|
|
|
|
|
|
mutex_lock(&global_tunables_lock);
|
|
|
|
|
|
|
|
count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
|
|
|
|
policy->governor_data = NULL;
|
2018-05-29 17:23:04 +02:00
|
|
|
if (!count) {
|
|
|
|
sugov_tunables_save(policy, tunables);
|
2016-04-02 01:09:12 +02:00
|
|
|
sugov_tunables_free(tunables);
|
2018-05-29 17:23:04 +02:00
|
|
|
}
|
2016-04-02 01:09:12 +02:00
|
|
|
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
if (sg_cpu->util_vessel) {
|
|
|
|
sg_cpu->util_vessel->finalizer(sg_cpu->util_vessel);
|
|
|
|
kair_obj_destructor(sg_cpu->util_vessel);
|
|
|
|
sg_cpu->util_vessel = NULL;
|
|
|
|
}
|
|
|
|
sg_policy->be_stochastic = false;
|
|
|
|
#endif
|
|
|
|
|
2020-04-04 13:06:34 +07:00
|
|
|
sugov_kthread_stop(sg_policy);
|
|
|
|
sugov_policy_free(sg_policy);
|
2017-08-01 19:36:30 +09:00
|
|
|
mutex_unlock(&global_tunables_lock);
|
2020-02-04 13:44:48 +02:00
|
|
|
|
2020-04-04 13:06:34 +07:00
|
|
|
cpufreq_disable_fast_switch(policy);
|
2016-04-02 01:09:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static int sugov_start(struct cpufreq_policy *policy)
|
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy = policy->governor_data;
|
|
|
|
unsigned int cpu;
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
char alias[KAIR_ALIAS_LEN];
|
|
|
|
#endif
|
2016-04-02 01:09:12 +02:00
|
|
|
|
ANDROID: cpufreq: schedutil: add up/down frequency transition rate limits
The rate-limit tunable in the schedutil governor applies to transitions
to both lower and higher frequencies. On several platforms it is not the
ideal tunable though, as it is difficult to get best power/performance
figures using the same limit in both directions.
It is common on mobile platforms with demanding user interfaces to want
to increase frequency rapidly for example but decrease slowly.
One of the example can be a case where we have short busy periods
followed by similar or longer idle periods. If we keep the rate-limit
high enough, we will not go to higher frequencies soon enough. On the
other hand, if we keep it too low, we will have too many frequency
transitions, as we will always reduce the frequency after the busy
period.
It would be very useful if we can set low rate-limit while increasing
the frequency (so that we can respond to the short busy periods quickly)
and high rate-limit while decreasing frequency (so that we don't reduce
the frequency immediately after the short busy period and that may avoid
frequency transitions before the next busy period).
Implement separate up/down transition rate limits. Note that the
governor avoids frequency recalculations for a period equal to minimum
of up and down rate-limit. A global mutex is also defined to protect
updates to min_rate_limit_us via two separate sysfs files.
Note that this wouldn't change behavior of the schedutil governor for
the platforms which wish to keep same values for both up and down rate
limits.
This is tested with the rt-app [1] on ARM Exynos, dual A15 processor
platform.
Testcase: Run a SCHED_OTHER thread on CPU0 which will emulate work-load
for X ms of busy period out of the total period of Y ms, i.e. Y - X ms
of idle period. The values of X/Y taken were: 20/40, 20/50, 20/70, i.e
idle periods of 20, 30 and 50 ms respectively. These were tested against
values of up/down rate limits as: 10/10 ms and 10/40 ms.
For every test we noticed a performance increase of 5-10% with the
schedutil governor, which was very much expected.
[Viresh]: Simplified user interface and introduced min_rate_limit_us +
mutex, rewrote commit log and included test results.
[1] https://github.com/scheduler-tools/rt-app/
Change-Id: I18720a83855b196b8e21dcdc8deae79131635b84
Signed-off-by: Steve Muckle <smuckle.linux@gmail.com>
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
(applied from https://marc.info/?l=linux-kernel&m=147936011103832&w=2)
[trivial adaptations]
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
2016-11-17 10:48:45 +05:30
|
|
|
sg_policy->up_rate_delay_ns =
|
|
|
|
sg_policy->tunables->up_rate_limit_us * NSEC_PER_USEC;
|
|
|
|
sg_policy->down_rate_delay_ns =
|
|
|
|
sg_policy->tunables->down_rate_limit_us * NSEC_PER_USEC;
|
|
|
|
update_min_rate_limit_us(sg_policy);
|
2016-04-02 01:09:12 +02:00
|
|
|
sg_policy->last_freq_update_time = 0;
|
2018-05-09 16:05:24 +05:30
|
|
|
sg_policy->next_freq = 0;
|
2016-04-02 01:09:12 +02:00
|
|
|
sg_policy->work_in_progress = false;
|
|
|
|
sg_policy->need_freq_update = false;
|
2017-03-02 14:03:20 +05:30
|
|
|
sg_policy->cached_raw_freq = 0;
|
2019-08-07 12:36:01 +05:30
|
|
|
sg_policy->limits_changed = false;
|
2016-04-02 01:09:12 +02:00
|
|
|
|
|
|
|
for_each_cpu(cpu, policy->cpus) {
|
|
|
|
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
|
|
|
|
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
if (cpu != policy->cpu) {
|
|
|
|
memset(sg_cpu, 0, sizeof(*sg_cpu));
|
|
|
|
goto skip_subcpus;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!sg_policy->be_stochastic) {
|
|
|
|
memset(alias, 0, KAIR_ALIAS_LEN);
|
|
|
|
sprintf(alias, "govern%d", cpu);
|
|
|
|
memset(sg_cpu, 0, sizeof(*sg_cpu));
|
|
|
|
sg_cpu->util_vessel =
|
|
|
|
kair_obj_creator(alias,
|
|
|
|
UTILAVG_KAIR_VARIANCE,
|
|
|
|
policy->cpuinfo.max_freq,
|
|
|
|
policy->cpuinfo.min_freq,
|
|
|
|
&kairistic_cpufreq);
|
|
|
|
if (sg_cpu->util_vessel->initializer(sg_cpu->util_vessel) < 0) {
|
|
|
|
sg_cpu->util_vessel->finalizer(sg_cpu->util_vessel);
|
|
|
|
kair_obj_destructor(sg_cpu->util_vessel);
|
|
|
|
sg_cpu->util_vessel = NULL;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
struct kair_class *vptr = sg_cpu->util_vessel;
|
|
|
|
memset(sg_cpu, 0, sizeof(*sg_cpu));
|
|
|
|
sg_cpu->util_vessel = vptr;
|
|
|
|
}
|
|
|
|
skip_subcpus:
|
|
|
|
#else
|
2017-03-19 14:30:02 +01:00
|
|
|
memset(sg_cpu, 0, sizeof(*sg_cpu));
|
2020-09-18 16:40:46 +03:00
|
|
|
#endif
|
2016-04-02 01:09:12 +02:00
|
|
|
sg_cpu->sg_policy = sg_policy;
|
2018-01-19 16:52:59 +09:00
|
|
|
sg_cpu->flags = 0;
|
2017-11-02 20:47:24 +09:00
|
|
|
sugov_start_slack(cpu);
|
2017-03-19 14:30:02 +01:00
|
|
|
sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
|
2017-07-06 10:05:52 -07:00
|
|
|
}
|
|
|
|
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
sg_policy->be_stochastic = true;
|
|
|
|
#endif
|
|
|
|
|
2017-07-06 10:05:52 -07:00
|
|
|
for_each_cpu(cpu, policy->cpus) {
|
|
|
|
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
|
|
|
|
|
2017-03-19 14:30:02 +01:00
|
|
|
cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
|
|
|
|
policy_is_shared(policy) ?
|
|
|
|
sugov_update_shared :
|
|
|
|
sugov_update_single);
|
2016-04-02 01:09:12 +02:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-02 23:24:15 +02:00
|
|
|
static void sugov_stop(struct cpufreq_policy *policy)
|
2016-04-02 01:09:12 +02:00
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy = policy->governor_data;
|
|
|
|
unsigned int cpu;
|
|
|
|
|
2017-11-02 20:47:24 +09:00
|
|
|
for_each_cpu(cpu, policy->cpus) {
|
|
|
|
sugov_stop_slack(cpu);
|
2016-04-02 01:09:12 +02:00
|
|
|
cpufreq_remove_update_util_hook(cpu);
|
2017-11-02 20:47:24 +09:00
|
|
|
}
|
2016-04-02 01:09:12 +02:00
|
|
|
|
|
|
|
synchronize_sched();
|
|
|
|
|
2020-09-18 16:40:46 +03:00
|
|
|
#ifdef CONFIG_SCHED_KAIR_GLUE
|
|
|
|
for_each_cpu(cpu, policy->cpus) {
|
|
|
|
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
|
|
|
|
if (sg_cpu->util_vessel) {
|
|
|
|
sg_cpu->util_vessel->stopper(sg_cpu->util_vessel);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-04-04 13:06:34 +07:00
|
|
|
if (!policy->fast_switch_enabled) {
|
|
|
|
irq_work_sync(&sg_policy->irq_work);
|
|
|
|
kthread_cancel_work_sync(&sg_policy->work);
|
|
|
|
}
|
2016-04-02 01:09:12 +02:00
|
|
|
}
|
|
|
|
|
2016-06-02 23:24:15 +02:00
|
|
|
static void sugov_limits(struct cpufreq_policy *policy)
|
2016-04-02 01:09:12 +02:00
|
|
|
{
|
|
|
|
struct sugov_policy *sg_policy = policy->governor_data;
|
|
|
|
|
2017-08-01 19:36:30 +09:00
|
|
|
mutex_lock(&global_tunables_lock);
|
|
|
|
|
|
|
|
if (!sg_policy) {
|
|
|
|
mutex_unlock(&global_tunables_lock);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-04-02 01:09:12 +02:00
|
|
|
if (!policy->fast_switch_enabled) {
|
|
|
|
mutex_lock(&sg_policy->work_lock);
|
2016-05-18 17:55:31 +05:30
|
|
|
cpufreq_policy_apply_limits(policy);
|
2016-04-02 01:09:12 +02:00
|
|
|
mutex_unlock(&sg_policy->work_lock);
|
|
|
|
}
|
|
|
|
|
2017-11-02 20:47:24 +09:00
|
|
|
sugov_update_min(policy);
|
|
|
|
|
2019-08-07 12:36:01 +05:30
|
|
|
sg_policy->limits_changed = true;
|
2017-08-01 19:36:30 +09:00
|
|
|
|
|
|
|
mutex_unlock(&global_tunables_lock);
|
2016-04-02 01:09:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct cpufreq_governor schedutil_gov = {
|
|
|
|
.name = "schedutil",
|
|
|
|
.owner = THIS_MODULE,
|
2016-06-02 23:24:15 +02:00
|
|
|
.init = sugov_init,
|
|
|
|
.exit = sugov_exit,
|
|
|
|
.start = sugov_start,
|
|
|
|
.stop = sugov_stop,
|
|
|
|
.limits = sugov_limits,
|
2016-04-02 01:09:12 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
|
|
|
|
struct cpufreq_governor *cpufreq_default_governor(void)
|
|
|
|
{
|
|
|
|
return &schedutil_gov;
|
|
|
|
}
|
|
|
|
#endif
|
2016-08-16 22:14:55 +02:00
|
|
|
|
2017-11-02 20:47:24 +09:00
|
|
|
static void sugov_update_min(struct cpufreq_policy *policy)
|
|
|
|
{
|
|
|
|
int cpu, max_cap;
|
|
|
|
struct sugov_exynos *sg_exynos;
|
|
|
|
int min_cap;
|
|
|
|
|
|
|
|
max_cap = arch_scale_cpu_capacity(NULL, policy->cpu);
|
|
|
|
|
|
|
|
/* min_cap is minimum value making higher frequency than policy->min */
|
|
|
|
min_cap = max_cap * policy->min / policy->max;
|
|
|
|
min_cap = (min_cap * 4 / 5) + 1;
|
|
|
|
|
|
|
|
for_each_cpu(cpu, policy->cpus) {
|
|
|
|
sg_exynos = &per_cpu(sugov_exynos, cpu);
|
|
|
|
sg_exynos->min = min_cap;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sugov_nop_timer(unsigned long data)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The purpose of slack-timer is to wake up the CPU from IDLE, in order
|
|
|
|
* to decrease its frequency if it is not set to minimum already.
|
|
|
|
*
|
|
|
|
* This is important for platforms where CPU with higher frequencies
|
|
|
|
* consume higher power even at IDLE.
|
|
|
|
*/
|
|
|
|
trace_sugov_slack_func(smp_processor_id());
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sugov_start_slack(int cpu)
|
|
|
|
{
|
|
|
|
struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
|
|
|
|
|
|
|
|
if (!sg_exynos->enabled)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sg_exynos->min = ULONG_MAX;
|
|
|
|
sg_exynos->started = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sugov_stop_slack(int cpu)
|
|
|
|
{
|
|
|
|
struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
|
|
|
|
|
|
|
|
sg_exynos->started = false;
|
|
|
|
if (timer_pending(&sg_exynos->timer))
|
|
|
|
del_timer_sync(&sg_exynos->timer);
|
|
|
|
}
|
|
|
|
|
|
|
|
static s64 get_next_event_time_ms(void)
|
|
|
|
{
|
|
|
|
return ktime_to_us(tick_nohz_get_sleep_length());
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sugov_need_slack_timer(unsigned int cpu)
|
|
|
|
{
|
|
|
|
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
|
|
|
|
struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
|
|
|
|
|
|
|
|
if (sg_cpu->util > sg_exynos->min &&
|
|
|
|
get_next_event_time_ms() > sg_exynos->expired_time)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sugov_pm_notifier(struct notifier_block *self,
|
|
|
|
unsigned long action, void *v)
|
|
|
|
{
|
|
|
|
unsigned int cpu = raw_smp_processor_id();
|
|
|
|
struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
|
|
|
|
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
|
|
|
|
struct timer_list *timer = &sg_exynos->timer;
|
|
|
|
|
|
|
|
if (!sg_exynos->started)
|
|
|
|
return NOTIFY_OK;
|
|
|
|
|
|
|
|
switch (action) {
|
|
|
|
case CPU_PM_ENTER_PREPARE:
|
|
|
|
if (timer_pending(timer))
|
|
|
|
del_timer_sync(timer);
|
|
|
|
|
|
|
|
if (sugov_need_slack_timer(cpu)) {
|
|
|
|
timer->expires = jiffies + msecs_to_jiffies(sg_exynos->expired_time);
|
|
|
|
add_timer_on(timer, cpu);
|
|
|
|
trace_sugov_slack(cpu, sg_cpu->util, sg_exynos->min, action, 1);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CPU_PM_ENTER:
|
|
|
|
if (timer_pending(timer) && !sugov_need_slack_timer(cpu)) {
|
|
|
|
del_timer_sync(timer);
|
|
|
|
trace_sugov_slack(cpu, sg_cpu->util, sg_exynos->min, action, -1);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case CPU_PM_EXIT_POST:
|
|
|
|
if (timer_pending(timer) && (time_after(timer->expires, jiffies))) {
|
|
|
|
del_timer_sync(timer);
|
|
|
|
trace_sugov_slack(cpu, sg_cpu->util, sg_exynos->min, action, -1);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block sugov_pm_nb = {
|
|
|
|
.notifier_call = sugov_pm_notifier,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int find_cpu_pm_qos_class(int pm_qos_class)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
|
|
|
|
|
|
|
|
if ((sg_exynos->qos_min_class == pm_qos_class) &&
|
|
|
|
cpumask_test_cpu(cpu, cpu_active_mask))
|
|
|
|
return cpu;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_err("cannot find cpu of PM QoS class\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sugov_pm_qos_callback(struct notifier_block *nb,
|
|
|
|
unsigned long val, void *v)
|
|
|
|
{
|
|
|
|
struct sugov_cpu *sg_cpu;
|
|
|
|
struct cpufreq_policy *policy;
|
|
|
|
int pm_qos_class = *((int *)v);
|
|
|
|
unsigned int next_freq;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
cpu = find_cpu_pm_qos_class(pm_qos_class);
|
|
|
|
if (cpu < 0)
|
|
|
|
return NOTIFY_BAD;
|
|
|
|
|
|
|
|
sg_cpu = &per_cpu(sugov_cpu, cpu);
|
|
|
|
if (!sg_cpu || !sg_cpu->sg_policy || !sg_cpu->sg_policy->policy)
|
|
|
|
return NOTIFY_BAD;
|
|
|
|
|
|
|
|
next_freq = sg_cpu->sg_policy->next_freq;
|
|
|
|
|
|
|
|
policy = cpufreq_cpu_get(cpu);
|
|
|
|
if (!policy)
|
|
|
|
return NOTIFY_BAD;
|
|
|
|
|
|
|
|
if (val >= policy->cur) {
|
|
|
|
cpufreq_cpu_put(policy);
|
|
|
|
return NOTIFY_BAD;
|
|
|
|
}
|
|
|
|
|
|
|
|
__cpufreq_driver_target(policy, next_freq, CPUFREQ_RELATION_L);
|
|
|
|
|
|
|
|
cpufreq_cpu_put(policy);
|
|
|
|
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block sugov_min_qos_notifier = {
|
|
|
|
.notifier_call = sugov_pm_qos_callback,
|
|
|
|
.priority = INT_MIN,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init sugov_parse_dt(struct device_node *dn, int cpu)
|
|
|
|
{
|
|
|
|
struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
|
|
|
|
|
|
|
|
/* parsing slack info */
|
|
|
|
if (of_property_read_u32(dn, "enabled", &sg_exynos->enabled))
|
|
|
|
return -EINVAL;
|
|
|
|
if (sg_exynos->enabled)
|
|
|
|
if (of_property_read_u32(dn, "expired_time", &sg_exynos->expired_time))
|
|
|
|
sg_exynos->expired_time = DEFAULT_EXPIRED_TIME;
|
|
|
|
|
|
|
|
/* parsing pm_qos_class info */
|
|
|
|
if (of_property_read_u32(dn, "qos_min_class", &sg_exynos->qos_min_class))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __init sugov_exynos_init(void)
|
|
|
|
{
|
|
|
|
int cpu, ret;
|
|
|
|
struct device_node *dn = NULL;
|
|
|
|
const char *buf;
|
|
|
|
|
|
|
|
while ((dn = of_find_node_by_type(dn, "schedutil-domain"))) {
|
|
|
|
struct cpumask shared_mask;
|
|
|
|
/* Get shared cpus */
|
|
|
|
ret = of_property_read_string(dn, "shared-cpus", &buf);
|
|
|
|
if (ret)
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
cpulist_parse(buf, &shared_mask);
|
|
|
|
for_each_cpu(cpu, &shared_mask)
|
|
|
|
if (sugov_parse_dt(dn, cpu))
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
struct sugov_exynos *sg_exynos = &per_cpu(sugov_exynos, cpu);
|
|
|
|
|
|
|
|
if (!sg_exynos->enabled)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Initialize slack-timer */
|
|
|
|
init_timer_pinned(&sg_exynos->timer);
|
|
|
|
sg_exynos->timer.function = sugov_nop_timer;
|
|
|
|
}
|
|
|
|
|
|
|
|
pm_qos_add_notifier(PM_QOS_CLUSTER0_FREQ_MIN, &sugov_min_qos_notifier);
|
|
|
|
pm_qos_add_notifier(PM_QOS_CLUSTER1_FREQ_MIN, &sugov_min_qos_notifier);
|
|
|
|
cpu_pm_register_notifier(&sugov_pm_nb);
|
|
|
|
|
|
|
|
return;
|
|
|
|
exit:
|
|
|
|
pr_info("%s: failed to initialized slack_timer, pm_qos handler\n", __func__);
|
|
|
|
}
|
|
|
|
|
2016-08-16 22:14:55 +02:00
|
|
|
static int __init sugov_register(void)
|
|
|
|
{
|
2017-11-02 20:47:24 +09:00
|
|
|
sugov_exynos_init();
|
|
|
|
|
2016-08-16 22:14:55 +02:00
|
|
|
return cpufreq_register_governor(&schedutil_gov);
|
|
|
|
}
|
|
|
|
fs_initcall(sugov_register);
|