Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip into next
Pull scheduler updates from Ingo Molnar: "The main scheduling related changes in this cycle were: - various sched/numa updates, for better performance - tree wide cleanup of open coded nice levels - nohz fix related to rq->nr_running use - cpuidle changes and continued consolidation to improve the kernel/sched/idle.c high level idle scheduling logic. As part of this effort I pulled cpuidle driver changes from Rafael as well. - standardized idle polling amongst architectures - continued work on preparing better power/energy aware scheduling - sched/rt updates - misc fixlets and cleanups" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (49 commits) sched/numa: Decay ->wakee_flips instead of zeroing sched/numa: Update migrate_improves/degrades_locality() sched/numa: Allow task switch if load imbalance improves sched/rt: Fix 'struct sched_dl_entity' and dl_task_time() comments, to match the current upstream code sched: Consolidate open coded implementations of nice level frobbing into nice_to_rlimit() and rlimit_to_nice() sched: Initialize rq->age_stamp on processor start sched, nohz: Change rq->nr_running to always use wrappers sched: Fix the rq->next_balance logic in rebalance_domains() and idle_balance() sched: Use clamp() and clamp_val() to make sys_nice() more readable sched: Do not zero sg->cpumask and sg->sgp->power in build_sched_groups() sched/numa: Fix initialization of sched_domain_topology for NUMA sched: Call select_idle_sibling() when not affine_sd sched: Simplify return logic in sched_read_attr() sched: Simplify return logic in sched_copy_attr() sched: Fix exec_start/task_hot on migrated tasks arm64: Remove TIF_POLLING_NRFLAG metag: Remove TIF_POLLING_NRFLAG sched/idle: Make cpuidle_idle_call() void sched/idle: Reflow cpuidle_idle_call() sched/idle: Delay clearing the polling bit ...
This commit is contained in:
commit
c84a1e32ee
48 changed files with 761 additions and 664 deletions
|
@ -870,6 +870,7 @@ enum cpu_idle_type {
|
|||
#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
|
||||
#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
|
||||
#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */
|
||||
#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
|
||||
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
|
||||
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
|
||||
#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
|
||||
|
@ -877,7 +878,26 @@ enum cpu_idle_type {
|
|||
#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
|
||||
#define SD_NUMA 0x4000 /* cross-node balancing */
|
||||
|
||||
extern int __weak arch_sd_sibiling_asym_packing(void);
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
static inline const int cpu_smt_flags(void)
|
||||
{
|
||||
return SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_MC
|
||||
static inline const int cpu_core_flags(void)
|
||||
{
|
||||
return SD_SHARE_PKG_RESOURCES;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
static inline const int cpu_numa_flags(void)
|
||||
{
|
||||
return SD_NUMA;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct sched_domain_attr {
|
||||
int relax_domain_level;
|
||||
|
@ -985,6 +1005,38 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
|
|||
|
||||
bool cpus_share_cache(int this_cpu, int that_cpu);
|
||||
|
||||
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
|
||||
typedef const int (*sched_domain_flags_f)(void);
|
||||
|
||||
#define SDTL_OVERLAP 0x01
|
||||
|
||||
struct sd_data {
|
||||
struct sched_domain **__percpu sd;
|
||||
struct sched_group **__percpu sg;
|
||||
struct sched_group_power **__percpu sgp;
|
||||
};
|
||||
|
||||
struct sched_domain_topology_level {
|
||||
sched_domain_mask_f mask;
|
||||
sched_domain_flags_f sd_flags;
|
||||
int flags;
|
||||
int numa_level;
|
||||
struct sd_data data;
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
char *name;
|
||||
#endif
|
||||
};
|
||||
|
||||
extern struct sched_domain_topology_level *sched_domain_topology;
|
||||
|
||||
extern void set_sched_topology(struct sched_domain_topology_level *tl);
|
||||
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
# define SD_INIT_NAME(type) .name = #type
|
||||
#else
|
||||
# define SD_INIT_NAME(type)
|
||||
#endif
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
struct sched_domain_attr;
|
||||
|
@ -1123,8 +1175,8 @@ struct sched_dl_entity {
|
|||
|
||||
/*
|
||||
* Original scheduling parameters. Copied here from sched_attr
|
||||
* during sched_setscheduler2(), they will remain the same until
|
||||
* the next sched_setscheduler2().
|
||||
* during sched_setattr(), they will remain the same until
|
||||
* the next sched_setattr().
|
||||
*/
|
||||
u64 dl_runtime; /* maximum runtime for each instance */
|
||||
u64 dl_deadline; /* relative deadline of each instance */
|
||||
|
@ -2723,51 +2775,9 @@ static inline int spin_needbreak(spinlock_t *lock)
|
|||
|
||||
/*
|
||||
* Idle thread specific functions to determine the need_resched
|
||||
* polling state. We have two versions, one based on TS_POLLING in
|
||||
* thread_info.status and one based on TIF_POLLING_NRFLAG in
|
||||
* thread_info.flags
|
||||
* polling state.
|
||||
*/
|
||||
#ifdef TS_POLLING
|
||||
static inline int tsk_is_polling(struct task_struct *p)
|
||||
{
|
||||
return task_thread_info(p)->status & TS_POLLING;
|
||||
}
|
||||
static inline void __current_set_polling(void)
|
||||
{
|
||||
current_thread_info()->status |= TS_POLLING;
|
||||
}
|
||||
|
||||
static inline bool __must_check current_set_polling_and_test(void)
|
||||
{
|
||||
__current_set_polling();
|
||||
|
||||
/*
|
||||
* Polling state must be visible before we test NEED_RESCHED,
|
||||
* paired by resched_task()
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
return unlikely(tif_need_resched());
|
||||
}
|
||||
|
||||
static inline void __current_clr_polling(void)
|
||||
{
|
||||
current_thread_info()->status &= ~TS_POLLING;
|
||||
}
|
||||
|
||||
static inline bool __must_check current_clr_polling_and_test(void)
|
||||
{
|
||||
__current_clr_polling();
|
||||
|
||||
/*
|
||||
* Polling state must be visible before we test NEED_RESCHED,
|
||||
* paired by resched_task()
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
return unlikely(tif_need_resched());
|
||||
}
|
||||
#elif defined(TIF_POLLING_NRFLAG)
|
||||
#ifdef TIF_POLLING_NRFLAG
|
||||
static inline int tsk_is_polling(struct task_struct *p)
|
||||
{
|
||||
return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue