exynos-linux-stable/include/linux/cpu.h
Greg Kroah-Hartman f85543ba3c This is the 4.9.120 stable release
-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlt0UY8ACgkQONu9yGCS
 aT7S6hAAlsHMdJgnhCQ06Ec1ld1b3Q7tehn+xVFgWAPyHm/L3koM/78IMjo7ZYCV
 IxiIQyQ+JCn9DXQb2nqeEFgo8DYo83KvBkIgI+GQZHuVp+Dp9AvQLV8Bm3CQwiCY
 LPlP1sWs0xRZxovazMJ5MLsAOf9hZVkpwWoQHrEQMlUHpKYwf6GRBteZ6C3evZ4G
 lPo5N986h2YDYA3FLA2h5EeFS2H39bgbnepJ6/4/cYBpDy443X3TrV6UZjDDhHST
 6XYPuqoApz+QIk2x2FfhVbZUb8WtPJNg/6IunOhlaUH/WCEN05lQ2x0jXAA4jOV/
 Z2QyGnqsD8hMleDeakzo+yggaECkK2n+b6SicmomXWj7ILmBCrAIG0dOtPksmAaw
 JP9mOKz5b87N2GOShSvj9LXuFOIO7TVvwFZCo4oYxkaW6ROxSO7Ffkiv8I6imMn5
 zPGSBG4Pr9eQfeO+IK2JAxrULICcFbh57XXEP5x7MH78yRw4hG++BtWg62pI7TQl
 l3zZ/eY8wKjTlNQbFkSAPenMypPic6w5NRA9tHST5XrwZkF0nzMWDz/1mAgOH5jx
 XVTK3kZabKAf3YQ2/2nAnUvDM4BsM1SwUxTfc1CNQHIl24G7Y3Z4Vxlfj5orNEQ+
 Y5OPKDatNi8gWBecDLNITih7h+WlRn1UKR/v4f8TPV0gyGhn2Vc=
 =peDG
 -----END PGP SIGNATURE-----

Merge 4.9.120 into android-4.9

Changes in 4.9.120
	ext4: fix check to prevent initializing reserved inodes
	tpm: fix race condition in tpm_common_write()
	parisc: Enable CONFIG_MLONGCALLS by default
	parisc: Define mb() and add memory barriers to assembler unlock sequences
	kasan: add no_sanitize attribute for clang builds
	Mark HI and TASKLET softirq synchronous
	xen/netfront: don't cache skb_shinfo()
	ACPI / LPSS: Add missing prv_offset setting for byt/cht PWM devices
	scsi: sr: Avoid that opening a CD-ROM hangs with runtime power management enabled
	init: rename and re-order boot_cpu_state_init()
	root dentries need RCU-delayed freeing
	make sure that __dentry_kill() always invalidates d_seq, unhashed or not
	fix mntput/mntput race
	fix __legitimize_mnt()/mntput() race
	proc/sysctl: prune stale dentries during unregistering
	proc/sysctl: Don't grab i_lock under sysctl_lock.
	proc: Fix proc_sys_prune_dcache to hold a sb reference
	IB/core: Make testing MR flags for writability a static inline function
	IB/mlx4: Mark user MR as writable if actual virtual memory is writable
	mtd: nand: qcom: Add a NULL check for devm_kasprintf()
	IB/ocrdma: fix out of bounds access to local buffer
	ARM: dts: imx6sx: fix irq for pcie bridge
	x86/paravirt: Fix spectre-v2 mitigations for paravirt guests
	x86/speculation: Protect against userspace-userspace spectreRSB
	kprobes/x86: Fix %p uses in error messages
	x86/irqflags: Provide a declaration for native_save_fl
	x86/speculation/l1tf: Increase 32bit PAE __PHYSICAL_PAGE_SHIFT
	mm: x86: move _PAGE_SWP_SOFT_DIRTY from bit 7 to bit 1
	x86/speculation/l1tf: Change order of offset/type in swap entry
	x86/speculation/l1tf: Protect swap entries against L1TF
	x86/speculation/l1tf: Protect PROT_NONE PTEs against speculation
	x86/speculation/l1tf: Make sure the first page is always reserved
	x86/speculation/l1tf: Add sysfs reporting for l1tf
	x86/speculation/l1tf: Disallow non privileged high MMIO PROT_NONE mappings
	x86/speculation/l1tf: Limit swap file size to MAX_PA/2
	x86/bugs: Move the l1tf function and define pr_fmt properly
	x86/smp: Provide topology_is_primary_thread()
	x86/topology: Provide topology_smt_supported()
	cpu/hotplug: Make bringup/teardown of smp threads symmetric
	cpu/hotplug: Split do_cpu_down()
	cpu/hotplug: Provide knobs to control SMT
	x86/cpu: Remove the pointless CPU printout
	x86/cpu/AMD: Remove the pointless detect_ht() call
	x86/cpu/common: Provide detect_ht_early()
	x86/cpu/topology: Provide detect_extended_topology_early()
	x86/cpu/intel: Evaluate smp_num_siblings early
	x86/CPU/AMD: Do not check CPUID max ext level before parsing SMP info
	x86/cpu/AMD: Evaluate smp_num_siblings early
	x86/apic: Ignore secondary threads if nosmt=force
	x86/speculation/l1tf: Extend 64bit swap file size limit
	x86/cpufeatures: Add detection of L1D cache flush support.
	x86/CPU/AMD: Move TOPOEXT reenablement before reading smp_num_siblings
	x86/speculation/l1tf: Protect PAE swap entries against L1TF
	x86/speculation/l1tf: Fix up pte->pfn conversion for PAE
	Revert "x86/apic: Ignore secondary threads if nosmt=force"
	cpu/hotplug: Boot HT siblings at least once
	x86/KVM: Warn user if KVM is loaded SMT and L1TF CPU bug being present
	x86/KVM/VMX: Add module argument for L1TF mitigation
	x86/KVM/VMX: Add L1D flush algorithm
	x86/KVM/VMX: Add L1D MSR based flush
	x86/KVM/VMX: Add L1D flush logic
	kvm: nVMX: Update MSR load counts on a VMCS switch
	x86/KVM/VMX: Split the VMX MSR LOAD structures to have an host/guest numbers
	x86/KVM/VMX: Add find_msr() helper function
	x86/KVM/VMX: Separate the VMX AUTOLOAD guest/host number accounting
	x86/KVM/VMX: Extend add_atomic_switch_msr() to allow VMENTER only MSRs
	x86/KVM/VMX: Use MSR save list for IA32_FLUSH_CMD if required
	cpu/hotplug: Online siblings when SMT control is turned on
	x86/litf: Introduce vmx status variable
	x86/kvm: Drop L1TF MSR list approach
	x86/l1tf: Handle EPT disabled state proper
	x86/kvm: Move l1tf setup function
	x86/kvm: Add static key for flush always
	x86/kvm: Serialize L1D flush parameter setter
	x86/kvm: Allow runtime control of L1D flush
	cpu/hotplug: Expose SMT control init function
	cpu/hotplug: Set CPU_SMT_NOT_SUPPORTED early
	x86/bugs, kvm: Introduce boot-time control of L1TF mitigations
	Documentation: Add section about CPU vulnerabilities
	x86/KVM/VMX: Initialize the vmx_l1d_flush_pages' content
	Documentation/l1tf: Fix typos
	cpu/hotplug: detect SMT disabled by BIOS
	x86/KVM/VMX: Don't set l1tf_flush_l1d to true from vmx_l1d_flush()
	x86/KVM/VMX: Replace 'vmx_l1d_flush_always' with 'vmx_l1d_flush_cond'
	x86/KVM/VMX: Move the l1tf_flush_l1d test to vmx_l1d_flush()
	x86/irq: Demote irq_cpustat_t::__softirq_pending to u16
	x86/KVM/VMX: Introduce per-host-cpu analogue of l1tf_flush_l1d
	x86: Don't include linux/irq.h from asm/hardirq.h
	x86/irq: Let interrupt handlers set kvm_cpu_l1tf_flush_l1d
	x86/KVM/VMX: Don't set l1tf_flush_l1d from vmx_handle_external_intr()
	Documentation/l1tf: Remove Yonah processors from not vulnerable list
	KVM: x86: Add a framework for supporting MSR-based features
	KVM: SVM: Add MSR-based feature support for serializing LFENCE
	KVM: X86: Introduce kvm_get_msr_feature()
	KVM: X86: Allow userspace to define the microcode version
	KVM: VMX: support MSR_IA32_ARCH_CAPABILITIES as a feature MSR
	x86/speculation: Simplify sysfs report of VMX L1TF vulnerability
	x86/speculation: Use ARCH_CAPABILITIES to skip L1D flush on vmentry
	KVM: VMX: Tell the nested hypervisor to skip L1D flush on vmentry
	cpu/hotplug: Fix SMT supported evaluation
	x86/speculation/l1tf: Invert all not present mappings
	x86/speculation/l1tf: Make pmd/pud_mknotpresent() invert
	x86/mm/pat: Make set_memory_np() L1TF safe
	x86/mm/kmmio: Make the tracer robust against L1TF
	tools headers: Synchronise x86 cpufeatures.h for L1TF additions
	x86/microcode: Do not upload microcode if CPUs are offline
	x86/microcode: Allow late microcode loading with SMT disabled
	x86/smp: fix non-SMP broken build due to redefinition of apic_id_is_primary_thread
	cpu/hotplug: Non-SMP machines do not make use of booted_once
	x86/init: fix build with CONFIG_SWAP=n
	x86/speculation/l1tf: Unbreak !__HAVE_ARCH_PFN_MODIFY_ALLOWED architectures
	x86/cpu/amd: Limit cpu_core_id fixup to families older than F17h
	x86/CPU/AMD: Have smp_num_siblings and cpu_llc_id always be present
	Linux 4.9.120

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
2018-08-15 18:30:15 +02:00

286 lines
8.9 KiB
C

/*
* include/linux/cpu.h - generic cpu definition
*
* This is mainly for topological representation. We define the
* basic 'struct cpu' here, which can be embedded in per-arch
* definitions of processors.
*
* Basic handling of the devices is done in drivers/base/cpu.c
*
* CPUs are exported via sysfs in the devices/system/cpu
* directory.
*/
#ifndef _LINUX_CPU_H_
#define _LINUX_CPU_H_
#include <linux/node.h>
#include <linux/compiler.h>
#include <linux/cpumask.h>
#include <linux/cpuhotplug.h>
struct device;
struct device_node;
struct attribute_group;
struct cpu {
int node_id; /* The node which contains the CPU */
int hotpluggable; /* creates sysfs control file if hotpluggable */
struct device dev;
};
extern void boot_cpu_init(void);
extern void boot_cpu_hotplug_init(void);
extern int register_cpu(struct cpu *cpu, int num);
extern struct device *get_cpu_device(unsigned cpu);
extern bool cpu_is_hotpluggable(unsigned cpu);
extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
int cpu, unsigned int *thread);
extern int cpu_add_dev_attr(struct device_attribute *attr);
extern void cpu_remove_dev_attr(struct device_attribute *attr);
extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
extern ssize_t cpu_show_meltdown(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spectre_v1(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_l1tf(struct device *dev,
struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
const char *fmt, ...);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
extern ssize_t arch_cpu_release(const char *, size_t);
#endif
struct notifier_block;
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
#define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
#define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
#define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
#define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
#define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
#define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
* lock is dropped */
#define CPU_BROKEN 0x000B /* CPU (unsigned)v did not die properly,
* perhaps due to preemption. */
/* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
* operation in progress
*/
#define CPU_TASKS_FROZEN 0x0010
#define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
#define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
#define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
#define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
#define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
#define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
#ifdef CONFIG_SMP
extern bool cpuhp_tasks_frozen;
/* Need to know about CPUs going up/down? */
#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
#define cpu_notifier(fn, pri) { \
static struct notifier_block fn##_nb = \
{ .notifier_call = fn, .priority = pri }; \
register_cpu_notifier(&fn##_nb); \
}
#define __cpu_notifier(fn, pri) { \
static struct notifier_block fn##_nb = \
{ .notifier_call = fn, .priority = pri }; \
__register_cpu_notifier(&fn##_nb); \
}
extern int register_cpu_notifier(struct notifier_block *nb);
extern int __register_cpu_notifier(struct notifier_block *nb);
extern void unregister_cpu_notifier(struct notifier_block *nb);
extern void __unregister_cpu_notifier(struct notifier_block *nb);
#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
static inline int register_cpu_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int __register_cpu_notifier(struct notifier_block *nb)
{
return 0;
}
static inline void unregister_cpu_notifier(struct notifier_block *nb)
{
}
static inline void __unregister_cpu_notifier(struct notifier_block *nb)
{
}
#endif
int cpu_up(unsigned int cpu);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
#define cpu_notifier_register_begin cpu_maps_update_begin
#define cpu_notifier_register_done cpu_maps_update_done
#else /* CONFIG_SMP */
#define cpuhp_tasks_frozen 0
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
static inline int register_cpu_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int __register_cpu_notifier(struct notifier_block *nb)
{
return 0;
}
static inline void unregister_cpu_notifier(struct notifier_block *nb)
{
}
static inline void __unregister_cpu_notifier(struct notifier_block *nb)
{
}
static inline void cpu_maps_update_begin(void)
{
}
static inline void cpu_maps_update_done(void)
{
}
static inline void cpu_notifier_register_begin(void)
{
}
static inline void cpu_notifier_register_done(void)
{
}
#endif /* CONFIG_SMP */
extern struct bus_type cpu_subsys;
#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
extern void cpu_hotplug_begin(void);
extern void cpu_hotplug_done(void);
extern void get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb)
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
#define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb)
void clear_tasks_mm_cpumask(int cpu);
int cpu_down(unsigned int cpu);
#else /* CONFIG_HOTPLUG_CPU */
static inline void cpu_hotplug_begin(void) {}
static inline void cpu_hotplug_done(void) {}
#define get_online_cpus() do { } while (0)
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
/* These aren't inline functions due to a GCC bug. */
#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
#define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
#define __unregister_hotcpu_notifier(nb) ({ (void)(nb); })
#endif /* CONFIG_HOTPLUG_CPU */
#ifdef CONFIG_PM_SLEEP_SMP
extern int freeze_secondary_cpus(int primary);
static inline int disable_nonboot_cpus(void)
{
return freeze_secondary_cpus(0);
}
extern void enable_nonboot_cpus(void);
#else /* !CONFIG_PM_SLEEP_SMP */
static inline int disable_nonboot_cpus(void) { return 0; }
static inline void enable_nonboot_cpus(void) {}
#endif /* !CONFIG_PM_SLEEP_SMP */
void cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable);
/* Attach to any functions which should be considered cpuidle. */
#define __cpuidle __attribute__((__section__(".cpuidle.text")))
bool cpu_in_idle(unsigned long pc);
void arch_cpu_idle(void);
void arch_cpu_idle_prepare(void);
void arch_cpu_idle_enter(void);
void arch_cpu_idle_exit(void);
void arch_cpu_idle_dead(void);
int cpu_report_state(int cpu);
int cpu_check_up_prepare(int cpu);
void cpu_set_state_online(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
bool cpu_wait_death(unsigned int cpu, int seconds);
bool cpu_report_death(void);
void cpuhp_report_idle_dead(void);
#else
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
enum cpuhp_smt_control {
CPU_SMT_ENABLED,
CPU_SMT_DISABLED,
CPU_SMT_FORCE_DISABLED,
CPU_SMT_NOT_SUPPORTED,
};
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
extern enum cpuhp_smt_control cpu_smt_control;
extern void cpu_smt_disable(bool force);
extern void cpu_smt_check_topology_early(void);
extern void cpu_smt_check_topology(void);
#else
# define cpu_smt_control (CPU_SMT_ENABLED)
static inline void cpu_smt_disable(bool force) { }
static inline void cpu_smt_check_topology_early(void) { }
static inline void cpu_smt_check_topology(void) { }
#endif
#define IDLE_START 1
#define IDLE_END 2
void idle_notifier_register(struct notifier_block *n);
void idle_notifier_unregister(struct notifier_block *n);
void idle_notifier_call_chain(unsigned long val);
#endif /* _LINUX_CPU_H_ */