2018-01-16 19:01:05 +09:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2017 Samsung Electronics Co., Ltd
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 and
|
|
|
|
* only version 2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/plist.h>
|
2018-03-23 14:07:09 +09:00
|
|
|
#include <linux/sched.h>
|
2018-01-16 19:01:05 +09:00
|
|
|
|
|
|
|
struct gb_qos_request {
|
|
|
|
struct plist_node node;
|
|
|
|
char *name;
|
|
|
|
bool active;
|
|
|
|
};
|
|
|
|
|
2018-05-25 14:01:52 +09:00
|
|
|
#define LEAVE_BAND 0
|
|
|
|
|
|
|
|
struct task_band {
|
|
|
|
int id;
|
2020-07-28 00:51:52 +07:00
|
|
|
int sse;
|
2018-05-25 14:01:52 +09:00
|
|
|
pid_t tgid;
|
|
|
|
raw_spinlock_t lock;
|
|
|
|
|
|
|
|
struct list_head members;
|
|
|
|
int member_count;
|
|
|
|
struct cpumask playable_cpus;
|
|
|
|
|
|
|
|
unsigned long util;
|
|
|
|
unsigned long last_update_time;
|
|
|
|
};
|
|
|
|
|
2020-07-28 00:51:52 +07:00
|
|
|
struct rq;
|
|
|
|
|
2018-08-09 11:11:05 +09:00
|
|
|
extern struct kobject *ems_kobj;
|
2020-07-28 00:51:52 +07:00
|
|
|
extern unsigned int get_cpu_max_capacity(unsigned int cpu, int sse);
|
|
|
|
#ifdef CONFIG_SCHED_EMS
|
|
|
|
/* core */
|
|
|
|
extern void init_ems(void);
|
2018-08-09 11:11:05 +09:00
|
|
|
|
2020-07-28 00:51:52 +07:00
|
|
|
extern struct sched_group *lb_fit_idlest_group(struct sched_domain *sd,
|
|
|
|
struct task_struct *p);
|
2018-04-13 11:30:57 +09:00
|
|
|
|
|
|
|
/* task util initialization */
|
2018-01-16 19:01:05 +09:00
|
|
|
extern void exynos_init_entity_util_avg(struct sched_entity *se);
|
2018-04-13 11:30:57 +09:00
|
|
|
|
|
|
|
/* wakeup balance */
|
2018-04-06 14:15:09 +09:00
|
|
|
extern int
|
|
|
|
exynos_wakeup_balance(struct task_struct *p, int prev_cpu, int sd_flag, int sync);
|
2018-01-16 19:01:05 +09:00
|
|
|
|
2018-04-13 11:30:57 +09:00
|
|
|
/* ontime migration */
|
2018-01-16 19:01:05 +09:00
|
|
|
extern void ontime_migration(void);
|
|
|
|
extern int ontime_can_migration(struct task_struct *p, int cpu);
|
2018-04-13 11:30:57 +09:00
|
|
|
extern void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight, struct sched_avg *sa);
|
|
|
|
extern void ontime_new_entity_load(struct task_struct *parent, struct sched_entity *se);
|
2018-01-16 19:01:05 +09:00
|
|
|
extern void ontime_trace_task_info(struct task_struct *p);
|
|
|
|
|
2018-04-13 11:30:57 +09:00
|
|
|
/* load balance trigger */
|
2018-03-05 17:41:06 +09:00
|
|
|
extern bool lbt_overutilized(int cpu, int level);
|
|
|
|
extern void update_lbt_overutil(int cpu, unsigned long capacity);
|
2018-01-16 19:01:05 +09:00
|
|
|
|
2018-04-13 11:30:57 +09:00
|
|
|
/* global boost */
|
2018-01-16 19:01:05 +09:00
|
|
|
extern void gb_qos_update_request(struct gb_qos_request *req, u32 new_value);
|
|
|
|
|
2018-05-25 14:01:52 +09:00
|
|
|
/* task band */
|
|
|
|
extern void sync_band(struct task_struct *p, bool join);
|
|
|
|
extern void newbie_join_band(struct task_struct *newbie);
|
|
|
|
extern void update_band(struct task_struct *p, long old_util);
|
|
|
|
extern int band_playing(struct task_struct *p, int cpu);
|
2020-07-28 00:51:52 +07:00
|
|
|
|
|
|
|
/* multi load */
|
|
|
|
void update_multi_load(u64 delta, int cpu, struct sched_avg *sa,
|
|
|
|
unsigned long weight, int running, struct cfs_rq *cfs_rq);
|
|
|
|
void init_multi_load(struct sched_entity *se);
|
|
|
|
void detach_entity_multi_load(struct cfs_rq *cfs_rq, struct sched_entity *se);
|
|
|
|
void attach_entity_multi_load(struct cfs_rq *cfs_rq, struct sched_entity *se);
|
|
|
|
void remove_entity_multi_load(struct cfs_rq *cfs_rq, struct sched_entity *se);
|
|
|
|
void apply_removed_multi_load(struct cfs_rq *cfs_rq);
|
|
|
|
void update_tg_multi_load(struct cfs_rq *cfs_rq, struct sched_entity *se);
|
|
|
|
void cfs_se_util_change_multi_load(struct task_struct *p, struct sched_avg *avg);
|
|
|
|
void enqueue_multi_load(struct cfs_rq *cfs_rq, struct task_struct *p);
|
|
|
|
void dequeue_multi_load(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep);
|
|
|
|
|
|
|
|
/* P.A.R.T */
|
|
|
|
void update_cpu_active_ratio(struct rq *rq, struct task_struct *p, int type);
|
|
|
|
void part_cpu_active_ratio(unsigned long *util, unsigned long *max, int cpu);
|
|
|
|
void set_part_period_start(struct rq *rq);
|
|
|
|
|
|
|
|
/* load balance */
|
|
|
|
extern void lb_add_cfs_task(struct rq *rq, struct sched_entity *se);
|
|
|
|
extern int lb_check_priority(int src_cpu, int dst_cpu);
|
|
|
|
extern struct list_head *lb_prefer_cfs_tasks(int src_cpu, int dst_cpu);
|
|
|
|
extern int lb_need_active_balance(enum cpu_idle_type idle,
|
|
|
|
struct sched_domain *sd, int src_cpu, int dst_cpu);
|
|
|
|
|
|
|
|
/* check the status of energy table */
|
|
|
|
extern bool energy_initialized;
|
|
|
|
extern void set_energy_table_status(bool status);
|
|
|
|
extern bool get_energy_table_status(void);
|
2018-01-16 19:01:05 +09:00
|
|
|
#else
|
2020-07-28 00:51:52 +07:00
|
|
|
static inline struct sched_group *lb_fit_idlest_group(struct sched_domain *sd,
|
|
|
|
struct task_struct *p) { return NULL; }
|
|
|
|
static inline void init_ems(void);
|
2018-01-16 19:01:05 +09:00
|
|
|
static inline void exynos_init_entity_util_avg(struct sched_entity *se) { }
|
2018-04-13 11:30:57 +09:00
|
|
|
|
2018-04-06 14:15:09 +09:00
|
|
|
static inline int
|
|
|
|
exynos_wakeup_balance(struct task_struct *p, int prev_cpu, int sd_flag, int sync)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
2018-01-16 19:01:05 +09:00
|
|
|
|
|
|
|
static inline void ontime_migration(void) { }
|
2018-04-13 11:30:57 +09:00
|
|
|
static inline int ontime_can_migration(struct task_struct *p, int cpu)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
static inline void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight, struct sched_avg *sa) { }
|
|
|
|
static inline void ontime_new_entity_load(struct task_struct *p, struct sched_entity *se) { }
|
2018-01-16 19:01:05 +09:00
|
|
|
static inline void ontime_trace_task_info(struct task_struct *p) { }
|
|
|
|
|
2018-04-13 11:30:57 +09:00
|
|
|
static inline bool lbt_overutilized(int cpu, int level)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2018-03-05 17:41:06 +09:00
|
|
|
static inline void update_lbt_overutil(int cpu, unsigned long capacity) { }
|
2018-01-16 19:01:05 +09:00
|
|
|
|
|
|
|
static inline void gb_qos_update_request(struct gb_qos_request *req, u32 new_value) { }
|
|
|
|
|
2018-05-25 14:01:52 +09:00
|
|
|
static inline void sync_band(struct task_struct *p, bool join) { }
|
|
|
|
static inline void newbie_join_band(struct task_struct *newbie) { }
|
2020-07-28 00:51:52 +07:00
|
|
|
static inline void update_band(struct task_struct *p, long old_util) { }
|
|
|
|
static inline int band_playing(struct task_struct *p, int cpu)
|
2018-05-25 14:01:52 +09:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2020-07-28 00:51:52 +07:00
|
|
|
|
|
|
|
static inline void update_multi_load(u64 delta, int cpu, struct sched_avg *sa,
|
|
|
|
unsigned long weight, int running, struct cfs_rq *cfs_rq) { }
|
|
|
|
static inline void init_multi_load(struct sched_entity *se) { }
|
|
|
|
static inline void detach_entity_multi_load(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
|
|
|
|
static inline void attach_entity_multi_load(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
|
|
|
|
static inline void remove_entity_multi_load(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
|
|
|
|
static inline void apply_removed_multi_load(struct cfs_rq *cfs_rq) { }
|
|
|
|
static inline void update_tg_multi_load(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
|
|
|
|
static inline void cfs_se_util_change_multi_load(struct task_struct *p, struct sched_avg *avg) { }
|
|
|
|
static inline void enqueue_multi_load(struct cfs_rq *cfs_rq, struct task_struct *p) { }
|
|
|
|
static inline void dequeue_multi_load(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep) { }
|
|
|
|
|
|
|
|
/* P.A.R.T */
|
|
|
|
static inline void update_cpu_active_ratio(struct rq *rq, struct task_struct *p, int type) { }
|
|
|
|
static inline void part_cpu_active_ratio(unsigned long *util, unsigned long *max, int cpu) { }
|
|
|
|
static inline void set_part_period_start(struct rq *rq) { }
|
|
|
|
|
|
|
|
static inline void lb_add_cfs_task(struct rq *rq, struct sched_entity *se) { }
|
|
|
|
static inline int lb_check_priority(int src_cpu, int dst_cpu)
|
2018-05-25 14:01:52 +09:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2020-07-28 00:51:52 +07:00
|
|
|
static inline struct list_head *lb_prefer_cfs_tasks(int src_cpu, int dst_cpu)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
static inline int lb_need_active_balance(enum cpu_idle_type idle,
|
|
|
|
struct sched_domain *sd, int src_cpu, int dst_cpu)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
static inline void set_energy_table_status(bool status) { }
|
|
|
|
static inline bool get_energy_table_status(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2018-04-10 10:18:38 +09:00
|
|
|
#endif /* CONFIG_SCHED_EMS */
|
2018-04-05 20:01:19 +09:00
|
|
|
|
2018-04-10 10:18:38 +09:00
|
|
|
#ifdef CONFIG_SIMPLIFIED_ENERGY_MODEL
|
|
|
|
extern void init_sched_energy_table(struct cpumask *cpus, int table_size,
|
|
|
|
unsigned long *f_table, unsigned int *v_table,
|
|
|
|
int max_f, int min_f);
|
2020-07-28 00:51:52 +07:00
|
|
|
extern void update_qos_capacity(int cpu, unsigned long freq, unsigned long max);
|
2018-04-10 10:18:38 +09:00
|
|
|
#else
|
2018-04-05 20:01:19 +09:00
|
|
|
static inline void init_sched_energy_table(struct cpumask *cpus, int table_size,
|
|
|
|
unsigned long *f_table, unsigned int *v_table,
|
|
|
|
int max_f, int min_f) { }
|
2020-07-28 00:51:52 +07:00
|
|
|
static inline void update_qos_capacity(int cpu, unsigned long freq, unsigned long max) { }
|
2018-04-10 10:18:38 +09:00
|
|
|
#endif
|
2020-07-28 00:51:52 +07:00
|
|
|
|
|
|
|
/* Fluid Real Time */
|
|
|
|
extern unsigned int frt_disable_cpufreq;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Maximum number of boost groups to support
|
|
|
|
* When per-task boosting is used we still allow only limited number of
|
|
|
|
* boost groups for two main reasons:
|
|
|
|
* 1. on a real system we usually have only few classes of workloads which
|
|
|
|
* make sense to boost with different values (e.g. background vs foreground
|
|
|
|
* tasks, interactive vs low-priority tasks)
|
|
|
|
* 2. a limited number allows for a simpler and more memory/time efficient
|
|
|
|
* implementation especially for the computation of the per-CPU boost
|
|
|
|
* value
|
|
|
|
*/
|
|
|
|
#define BOOSTGROUPS_COUNT 5
|
|
|
|
|
|
|
|
struct boost_groups {
|
|
|
|
/* Maximum boost value for all RUNNABLE tasks on a CPU */
|
|
|
|
int boost_max;
|
|
|
|
struct {
|
|
|
|
/* True when this boost group maps an actual cgroup */
|
|
|
|
bool valid;
|
|
|
|
/* The boost for tasks on that boost group */
|
|
|
|
int boost;
|
|
|
|
/* Count of RUNNABLE tasks on that boost group */
|
|
|
|
unsigned tasks;
|
|
|
|
} group[BOOSTGROUPS_COUNT];
|
|
|
|
/* CPU's boost group locking */
|
|
|
|
raw_spinlock_t lock;
|
|
|
|
};
|
|
|
|
|