Merge branch 'rcu-tasks.2014.09.10a' into HEAD
rcu-tasks.2014.09.10a: Add RCU-tasks flavor of RCU.
This commit is contained in:
commit
96b4672703
24 changed files with 613 additions and 93 deletions
|
@ -55,6 +55,7 @@ enum rcutorture_type {
|
|||
RCU_FLAVOR,
|
||||
RCU_BH_FLAVOR,
|
||||
RCU_SCHED_FLAVOR,
|
||||
RCU_TASKS_FLAVOR,
|
||||
SRCU_FLAVOR,
|
||||
INVALID_RCU_FLAVOR
|
||||
};
|
||||
|
@ -197,6 +198,28 @@ void call_rcu_sched(struct rcu_head *head,
|
|||
|
||||
void synchronize_sched(void);
|
||||
|
||||
/**
|
||||
* call_rcu_tasks() - Queue an RCU for invocation task-based grace period
|
||||
* @head: structure to be used for queueing the RCU updates.
|
||||
* @func: actual callback function to be invoked after the grace period
|
||||
*
|
||||
* The callback function will be invoked some time after a full grace
|
||||
* period elapses, in other words after all currently executing RCU
|
||||
* read-side critical sections have completed. call_rcu_tasks() assumes
|
||||
* that the read-side critical sections end at a voluntary context
|
||||
* switch (not a preemption!), entry into idle, or transition to usermode
|
||||
* execution. As such, there are no read-side primitives analogous to
|
||||
* rcu_read_lock() and rcu_read_unlock() because this primitive is intended
|
||||
* to determine that all tasks have passed through a safe state, not so
|
||||
* much for data-strcuture synchronization.
|
||||
*
|
||||
* See the description of call_rcu() for more detailed information on
|
||||
* memory ordering guarantees.
|
||||
*/
|
||||
void call_rcu_tasks(struct rcu_head *head, void (*func)(struct rcu_head *head));
|
||||
void synchronize_rcu_tasks(void);
|
||||
void rcu_barrier_tasks(void);
|
||||
|
||||
#ifdef CONFIG_PREEMPT_RCU
|
||||
|
||||
void __rcu_read_lock(void);
|
||||
|
@ -238,8 +261,8 @@ static inline int rcu_preempt_depth(void)
|
|||
|
||||
/* Internal to kernel */
|
||||
void rcu_init(void);
|
||||
void rcu_sched_qs(int cpu);
|
||||
void rcu_bh_qs(int cpu);
|
||||
void rcu_sched_qs(void);
|
||||
void rcu_bh_qs(void);
|
||||
void rcu_check_callbacks(int cpu, int user);
|
||||
struct notifier_block;
|
||||
void rcu_idle_enter(void);
|
||||
|
@ -302,6 +325,36 @@ static inline void rcu_init_nohz(void)
|
|||
rcu_irq_exit(); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* Note a voluntary context switch for RCU-tasks benefit. This is a
|
||||
* macro rather than an inline function to avoid #include hell.
|
||||
*/
|
||||
#ifdef CONFIG_TASKS_RCU
|
||||
#define TASKS_RCU(x) x
|
||||
extern struct srcu_struct tasks_rcu_exit_srcu;
|
||||
#define rcu_note_voluntary_context_switch(t) \
|
||||
do { \
|
||||
if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
|
||||
ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
|
||||
} while (0)
|
||||
#else /* #ifdef CONFIG_TASKS_RCU */
|
||||
#define TASKS_RCU(x) do { } while (0)
|
||||
#define rcu_note_voluntary_context_switch(t) do { } while (0)
|
||||
#endif /* #else #ifdef CONFIG_TASKS_RCU */
|
||||
|
||||
/**
|
||||
* cond_resched_rcu_qs - Report potential quiescent states to RCU
|
||||
*
|
||||
* This macro resembles cond_resched(), except that it is defined to
|
||||
* report potential quiescent states to RCU-tasks even if the cond_resched()
|
||||
* machinery were to be shut off, as some advocate for PREEMPT kernels.
|
||||
*/
|
||||
#define cond_resched_rcu_qs() \
|
||||
do { \
|
||||
rcu_note_voluntary_context_switch(current); \
|
||||
cond_resched(); \
|
||||
} while (0)
|
||||
|
||||
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP)
|
||||
bool __rcu_is_watching(void);
|
||||
#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue