ANDROID: Fix massive cpufreq_times memory leaks
Every time _cpu_up() is called for a CPU, idle_thread_get() is called which then re-initializes a CPU's idle thread that was already previously created and cached in a global variable in smpboot.c. idle_thread_get() calls init_idle() which then calls __sched_fork(). __sched_fork() is where cpufreq_task_times_init() is, and cpufreq_task_times_init() allocates memory for the task struct's time_in_state array. Since idle_thread_get() reuses a task struct instance that was already previously created, this means that every time it calls init_idle(), cpufreq_task_times_init() allocates this array again and overwrites the existing allocation that the idle thread already had. This causes memory to be leaked every time a CPU is onlined. In order to fix this, move allocation of time_in_state into _do_fork to avoid allocating it at all for idle threads. The cpufreq times interface is intended to be used for tracking userspace tasks, so we can safely remove it from the kernel's idle threads without killing any functionality. But that's not all! Task structs can be freed outside of release_task(), which creates another memory leak because a task struct can be freed without having its cpufreq times allocation freed. To fix this, free the cpufreq times allocation at the same time that task struct allocations are freed, in free_task(). Since free_task() can also be called in error paths of copy_process() after dup_task_struct(), set time_in_state to NULL immediately after calling dup_task_struct() to avoid possible double free. Bug description and fix adapted from patch submitted by Sultan Alsawaf <sultanxda@gmail.com> at https://android-review.googlesource.com/c/kernel/msm/+/700134 Bug: 110044919 Test: Hikey960 builds, boots & reports /proc/<pid>/time_in_state correctly Change-Id: I12fe7611fc88eb7f6c39f8f7629ad27b6ec4722c Signed-off-by: Connor O'Brien <connoro@google.com>
This commit is contained in:
parent
23a1412b82
commit
47bbcd6bf8
5 changed files with 15 additions and 8 deletions
|
@ -234,16 +234,19 @@ static int uid_time_in_state_seq_show(struct seq_file *m, void *v)
|
||||||
|
|
||||||
void cpufreq_task_times_init(struct task_struct *p)
|
void cpufreq_task_times_init(struct task_struct *p)
|
||||||
{
|
{
|
||||||
void *temp;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int max_state;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&task_time_in_state_lock, flags);
|
spin_lock_irqsave(&task_time_in_state_lock, flags);
|
||||||
p->time_in_state = NULL;
|
p->time_in_state = NULL;
|
||||||
spin_unlock_irqrestore(&task_time_in_state_lock, flags);
|
spin_unlock_irqrestore(&task_time_in_state_lock, flags);
|
||||||
p->max_state = 0;
|
p->max_state = 0;
|
||||||
|
}
|
||||||
|
|
||||||
max_state = READ_ONCE(next_offset);
|
void cpufreq_task_times_alloc(struct task_struct *p)
|
||||||
|
{
|
||||||
|
void *temp;
|
||||||
|
unsigned long flags;
|
||||||
|
unsigned int max_state = READ_ONCE(next_offset);
|
||||||
|
|
||||||
/* We use one array to avoid multiple allocs per task */
|
/* We use one array to avoid multiple allocs per task */
|
||||||
temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC);
|
temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC);
|
||||||
|
|
|
@ -22,6 +22,7 @@
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_FREQ_TIMES
|
#ifdef CONFIG_CPU_FREQ_TIMES
|
||||||
void cpufreq_task_times_init(struct task_struct *p);
|
void cpufreq_task_times_init(struct task_struct *p);
|
||||||
|
void cpufreq_task_times_alloc(struct task_struct *p);
|
||||||
void cpufreq_task_times_exit(struct task_struct *p);
|
void cpufreq_task_times_exit(struct task_struct *p);
|
||||||
int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
|
int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
|
||||||
struct pid *pid, struct task_struct *p);
|
struct pid *pid, struct task_struct *p);
|
||||||
|
@ -32,6 +33,7 @@ void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end);
|
||||||
int single_uid_time_in_state_open(struct inode *inode, struct file *file);
|
int single_uid_time_in_state_open(struct inode *inode, struct file *file);
|
||||||
#else
|
#else
|
||||||
static inline void cpufreq_task_times_init(struct task_struct *p) {}
|
static inline void cpufreq_task_times_init(struct task_struct *p) {}
|
||||||
|
static inline void cpufreq_task_times_alloc(struct task_struct *p) {}
|
||||||
static inline void cpufreq_task_times_exit(struct task_struct *p) {}
|
static inline void cpufreq_task_times_exit(struct task_struct *p) {}
|
||||||
static inline void cpufreq_acct_update_power(struct task_struct *p,
|
static inline void cpufreq_acct_update_power(struct task_struct *p,
|
||||||
u64 cputime) {}
|
u64 cputime) {}
|
||||||
|
|
|
@ -54,7 +54,6 @@
|
||||||
#include <linux/writeback.h>
|
#include <linux/writeback.h>
|
||||||
#include <linux/shm.h>
|
#include <linux/shm.h>
|
||||||
#include <linux/kcov.h>
|
#include <linux/kcov.h>
|
||||||
#include <linux/cpufreq_times.h>
|
|
||||||
|
|
||||||
#include "sched/tune.h"
|
#include "sched/tune.h"
|
||||||
|
|
||||||
|
@ -172,8 +171,6 @@ void release_task(struct task_struct *p)
|
||||||
{
|
{
|
||||||
struct task_struct *leader;
|
struct task_struct *leader;
|
||||||
int zap_leader;
|
int zap_leader;
|
||||||
|
|
||||||
cpufreq_task_times_exit(p);
|
|
||||||
repeat:
|
repeat:
|
||||||
/* don't need to get the RCU readlock here - the process is dead and
|
/* don't need to get the RCU readlock here - the process is dead and
|
||||||
* can't be modifying its own credentials. But shut RCU-lockdep up */
|
* can't be modifying its own credentials. But shut RCU-lockdep up */
|
||||||
|
|
|
@ -77,6 +77,7 @@
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <linux/sysctl.h>
|
#include <linux/sysctl.h>
|
||||||
#include <linux/kcov.h>
|
#include <linux/kcov.h>
|
||||||
|
#include <linux/cpufreq_times.h>
|
||||||
|
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
|
@ -339,6 +340,8 @@ void put_task_stack(struct task_struct *tsk)
|
||||||
|
|
||||||
void free_task(struct task_struct *tsk)
|
void free_task(struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
|
cpufreq_task_times_exit(tsk);
|
||||||
|
|
||||||
#ifndef CONFIG_THREAD_INFO_IN_TASK
|
#ifndef CONFIG_THREAD_INFO_IN_TASK
|
||||||
/*
|
/*
|
||||||
* The task is finally done with both the stack and thread_info,
|
* The task is finally done with both the stack and thread_info,
|
||||||
|
@ -1527,6 +1530,8 @@ static __latent_entropy struct task_struct *copy_process(
|
||||||
if (!p)
|
if (!p)
|
||||||
goto fork_out;
|
goto fork_out;
|
||||||
|
|
||||||
|
cpufreq_task_times_init(p);
|
||||||
|
|
||||||
ftrace_graph_init_task(p);
|
ftrace_graph_init_task(p);
|
||||||
|
|
||||||
rt_mutex_init_task(p);
|
rt_mutex_init_task(p);
|
||||||
|
@ -1963,6 +1968,8 @@ long _do_fork(unsigned long clone_flags,
|
||||||
struct completion vfork;
|
struct completion vfork;
|
||||||
struct pid *pid;
|
struct pid *pid;
|
||||||
|
|
||||||
|
cpufreq_task_times_alloc(p);
|
||||||
|
|
||||||
trace_sched_process_fork(current, p);
|
trace_sched_process_fork(current, p);
|
||||||
|
|
||||||
pid = get_task_pid(p, PIDTYPE_PID);
|
pid = get_task_pid(p, PIDTYPE_PID);
|
||||||
|
|
|
@ -2261,8 +2261,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
|
||||||
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
|
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
cpufreq_task_times_init(p);
|
|
||||||
|
|
||||||
RB_CLEAR_NODE(&p->dl.rb_node);
|
RB_CLEAR_NODE(&p->dl.rb_node);
|
||||||
init_dl_task_timer(&p->dl);
|
init_dl_task_timer(&p->dl);
|
||||||
__dl_clear_params(p);
|
__dl_clear_params(p);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue