2018-01-16 19:01:05 +09:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2017 Samsung Electronics Co., Ltd
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 and
|
|
|
|
* only version 2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/plist.h>
|
2018-03-23 14:07:09 +09:00
|
|
|
#include <linux/sched.h>
|
2018-01-16 19:01:05 +09:00
|
|
|
|
|
|
|
#ifdef CONFIG_SCHED_TUNE
|
|
|
|
enum stune_group {
|
|
|
|
STUNE_ROOT,
|
|
|
|
STUNE_FOREGROUND,
|
|
|
|
STUNE_BACKGROUND,
|
|
|
|
STUNE_TOPAPP,
|
|
|
|
STUNE_GROUP_COUNT,
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
struct gb_qos_request {
|
|
|
|
struct plist_node node;
|
|
|
|
char *name;
|
|
|
|
bool active;
|
|
|
|
};
|
|
|
|
|
2018-03-22 14:43:33 +09:00
|
|
|
#ifdef CONFIG_SCHED_EMS
|
2018-03-14 13:46:14 +09:00
|
|
|
extern int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask,
|
|
|
|
int state, int cpus);
|
2018-03-13 20:35:22 +09:00
|
|
|
extern struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
|
|
|
|
struct task_struct *p);
|
2018-01-16 19:01:05 +09:00
|
|
|
extern void exynos_init_entity_util_avg(struct sched_entity *se);
|
|
|
|
extern int exynos_need_active_balance(enum cpu_idle_type idle,
|
|
|
|
struct sched_domain *sd, int src_cpu, int dst_cpu);
|
|
|
|
|
|
|
|
extern unsigned long global_boost(void);
|
|
|
|
extern int find_second_max_cap(void);
|
|
|
|
|
2018-01-29 22:40:38 +09:00
|
|
|
extern int exynos_select_cpu(struct task_struct *p, int *backup_cpu,
|
|
|
|
bool boosted, bool prefer_idle);
|
2018-01-16 19:01:05 +09:00
|
|
|
|
|
|
|
extern void ontime_migration(void);
|
|
|
|
extern int ontime_can_migration(struct task_struct *p, int cpu);
|
|
|
|
extern void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight,
|
|
|
|
struct sched_avg *sa);
|
|
|
|
extern void ontime_new_entity_load(struct task_struct *parent,
|
|
|
|
struct sched_entity *se);
|
|
|
|
extern void ontime_trace_task_info(struct task_struct *p);
|
|
|
|
extern void ehmp_update_max_cpu_capacity(int cpu, unsigned long val);
|
|
|
|
|
2018-03-05 17:41:06 +09:00
|
|
|
extern bool lbt_overutilized(int cpu, int level);
|
|
|
|
extern void update_lbt_overutil(int cpu, unsigned long capacity);
|
2018-01-16 19:01:05 +09:00
|
|
|
|
|
|
|
extern void gb_qos_update_request(struct gb_qos_request *req, u32 new_value);
|
|
|
|
|
|
|
|
extern void request_kernel_prefer_perf(int grp_idx, int enable);
|
2018-04-05 20:01:19 +09:00
|
|
|
|
|
|
|
extern void init_sched_energy_table(struct cpumask *cpus, int table_size,
|
|
|
|
unsigned long *f_table, unsigned int *v_table,
|
|
|
|
int max_f, int min_f);
|
2018-01-16 19:01:05 +09:00
|
|
|
#else
|
2018-03-14 13:46:14 +09:00
|
|
|
static inline int exynos_estimate_idle_state(int cpu_idx, struct cpumask *mask,
|
|
|
|
int state, int cpus) { return 0; }
|
2018-03-13 20:35:22 +09:00
|
|
|
static inline struct sched_group *exynos_fit_idlest_group(struct sched_domain *sd,
|
|
|
|
struct task_struct *p) { return NULL; }
|
2018-01-16 19:01:05 +09:00
|
|
|
static inline void exynos_init_entity_util_avg(struct sched_entity *se) { }
|
|
|
|
static inline int exynos_need_active_balance(enum cpu_idle_type idle,
|
|
|
|
struct sched_domain *sd, int src_cpu, int dst_cpu) { return 0; }
|
|
|
|
|
|
|
|
static inline unsigned long global_boost(void) { return 0; }
|
|
|
|
static inline int find_second_max_cap(void) { return -EINVAL; }
|
|
|
|
|
2018-03-22 14:43:33 +09:00
|
|
|
static inline int exynos_select_cpu(struct task_struct *p, int *backup_cpu,
|
|
|
|
bool boosted, bool prefer_idle) { return -EINVAL; }
|
2018-01-16 19:01:05 +09:00
|
|
|
|
|
|
|
static inline void ontime_migration(void) { }
|
|
|
|
static inline int ontime_can_migration(struct task_struct *p, int cpu) { return 1; }
|
|
|
|
static inline void ontime_update_load_avg(u64 delta, int cpu, unsigned long weight,
|
|
|
|
struct sched_avg *sa) { }
|
|
|
|
static inline void ontime_new_entity_load(struct task_struct *p,
|
|
|
|
struct sched_entity *se) { }
|
|
|
|
static inline void ontime_trace_task_info(struct task_struct *p) { }
|
|
|
|
|
|
|
|
static inline void ehmp_update_max_cpu_capacity(int cpu, unsigned long val) { }
|
|
|
|
|
2018-03-05 17:41:06 +09:00
|
|
|
static inline bool lbt_overutilized(int cpu, int level) { return false; }
|
|
|
|
static inline void update_lbt_overutil(int cpu, unsigned long capacity) { }
|
2018-01-16 19:01:05 +09:00
|
|
|
|
|
|
|
static inline void gb_qos_update_request(struct gb_qos_request *req, u32 new_value) { }
|
|
|
|
|
2018-03-22 14:43:33 +09:00
|
|
|
static inline void request_kernel_prefer_perf(int grp_idx, int enable) { }
|
2018-04-05 20:01:19 +09:00
|
|
|
|
|
|
|
static inline void init_sched_energy_table(struct cpumask *cpus, int table_size,
|
|
|
|
unsigned long *f_table, unsigned int *v_table,
|
|
|
|
int max_f, int min_f) { }
|
2018-03-22 14:43:33 +09:00
|
|
|
#endif /* CONFIG_SCHED_EMS */
|