This is the 4.9.120 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlt0UY8ACgkQONu9yGCS aT7S6hAAlsHMdJgnhCQ06Ec1ld1b3Q7tehn+xVFgWAPyHm/L3koM/78IMjo7ZYCV IxiIQyQ+JCn9DXQb2nqeEFgo8DYo83KvBkIgI+GQZHuVp+Dp9AvQLV8Bm3CQwiCY LPlP1sWs0xRZxovazMJ5MLsAOf9hZVkpwWoQHrEQMlUHpKYwf6GRBteZ6C3evZ4G lPo5N986h2YDYA3FLA2h5EeFS2H39bgbnepJ6/4/cYBpDy443X3TrV6UZjDDhHST 6XYPuqoApz+QIk2x2FfhVbZUb8WtPJNg/6IunOhlaUH/WCEN05lQ2x0jXAA4jOV/ Z2QyGnqsD8hMleDeakzo+yggaECkK2n+b6SicmomXWj7ILmBCrAIG0dOtPksmAaw JP9mOKz5b87N2GOShSvj9LXuFOIO7TVvwFZCo4oYxkaW6ROxSO7Ffkiv8I6imMn5 zPGSBG4Pr9eQfeO+IK2JAxrULICcFbh57XXEP5x7MH78yRw4hG++BtWg62pI7TQl l3zZ/eY8wKjTlNQbFkSAPenMypPic6w5NRA9tHST5XrwZkF0nzMWDz/1mAgOH5jx XVTK3kZabKAf3YQ2/2nAnUvDM4BsM1SwUxTfc1CNQHIl24G7Y3Z4Vxlfj5orNEQ+ Y5OPKDatNi8gWBecDLNITih7h+WlRn1UKR/v4f8TPV0gyGhn2Vc= =peDG -----END PGP SIGNATURE----- Merge 4.9.120 into android-4.9 Changes in 4.9.120 ext4: fix check to prevent initializing reserved inodes tpm: fix race condition in tpm_common_write() parisc: Enable CONFIG_MLONGCALLS by default parisc: Define mb() and add memory barriers to assembler unlock sequences kasan: add no_sanitize attribute for clang builds Mark HI and TASKLET softirq synchronous xen/netfront: don't cache skb_shinfo() ACPI / LPSS: Add missing prv_offset setting for byt/cht PWM devices scsi: sr: Avoid that opening a CD-ROM hangs with runtime power management enabled init: rename and re-order boot_cpu_state_init() root dentries need RCU-delayed freeing make sure that __dentry_kill() always invalidates d_seq, unhashed or not fix mntput/mntput race fix __legitimize_mnt()/mntput() race proc/sysctl: prune stale dentries during unregistering proc/sysctl: Don't grab i_lock under sysctl_lock. proc: Fix proc_sys_prune_dcache to hold a sb reference IB/core: Make testing MR flags for writability a static inline function IB/mlx4: Mark user MR as writable if actual virtual memory is writable mtd: nand: qcom: Add a NULL check for devm_kasprintf() IB/ocrdma: fix out of bounds access to local buffer ARM: dts: imx6sx: fix irq for pcie bridge x86/paravirt: Fix spectre-v2 mitigations for paravirt guests x86/speculation: Protect against userspace-userspace spectreRSB kprobes/x86: Fix %p uses in error messages x86/irqflags: Provide a declaration for native_save_fl x86/speculation/l1tf: Increase 32bit PAE __PHYSICAL_PAGE_SHIFT mm: x86: move _PAGE_SWP_SOFT_DIRTY from bit 7 to bit 1 x86/speculation/l1tf: Change order of offset/type in swap entry x86/speculation/l1tf: Protect swap entries against L1TF x86/speculation/l1tf: Protect PROT_NONE PTEs against speculation x86/speculation/l1tf: Make sure the first page is always reserved x86/speculation/l1tf: Add sysfs reporting for l1tf x86/speculation/l1tf: Disallow non privileged high MMIO PROT_NONE mappings x86/speculation/l1tf: Limit swap file size to MAX_PA/2 x86/bugs: Move the l1tf function and define pr_fmt properly x86/smp: Provide topology_is_primary_thread() x86/topology: Provide topology_smt_supported() cpu/hotplug: Make bringup/teardown of smp threads symmetric cpu/hotplug: Split do_cpu_down() cpu/hotplug: Provide knobs to control SMT x86/cpu: Remove the pointless CPU printout x86/cpu/AMD: Remove the pointless detect_ht() call x86/cpu/common: Provide detect_ht_early() x86/cpu/topology: Provide detect_extended_topology_early() x86/cpu/intel: Evaluate smp_num_siblings early x86/CPU/AMD: Do not check CPUID max ext level before parsing SMP info x86/cpu/AMD: Evaluate smp_num_siblings early x86/apic: Ignore secondary threads if nosmt=force x86/speculation/l1tf: Extend 64bit swap file size limit x86/cpufeatures: Add detection of L1D cache flush support. x86/CPU/AMD: Move TOPOEXT reenablement before reading smp_num_siblings x86/speculation/l1tf: Protect PAE swap entries against L1TF x86/speculation/l1tf: Fix up pte->pfn conversion for PAE Revert "x86/apic: Ignore secondary threads if nosmt=force" cpu/hotplug: Boot HT siblings at least once x86/KVM: Warn user if KVM is loaded SMT and L1TF CPU bug being present x86/KVM/VMX: Add module argument for L1TF mitigation x86/KVM/VMX: Add L1D flush algorithm x86/KVM/VMX: Add L1D MSR based flush x86/KVM/VMX: Add L1D flush logic kvm: nVMX: Update MSR load counts on a VMCS switch x86/KVM/VMX: Split the VMX MSR LOAD structures to have an host/guest numbers x86/KVM/VMX: Add find_msr() helper function x86/KVM/VMX: Separate the VMX AUTOLOAD guest/host number accounting x86/KVM/VMX: Extend add_atomic_switch_msr() to allow VMENTER only MSRs x86/KVM/VMX: Use MSR save list for IA32_FLUSH_CMD if required cpu/hotplug: Online siblings when SMT control is turned on x86/litf: Introduce vmx status variable x86/kvm: Drop L1TF MSR list approach x86/l1tf: Handle EPT disabled state proper x86/kvm: Move l1tf setup function x86/kvm: Add static key for flush always x86/kvm: Serialize L1D flush parameter setter x86/kvm: Allow runtime control of L1D flush cpu/hotplug: Expose SMT control init function cpu/hotplug: Set CPU_SMT_NOT_SUPPORTED early x86/bugs, kvm: Introduce boot-time control of L1TF mitigations Documentation: Add section about CPU vulnerabilities x86/KVM/VMX: Initialize the vmx_l1d_flush_pages' content Documentation/l1tf: Fix typos cpu/hotplug: detect SMT disabled by BIOS x86/KVM/VMX: Don't set l1tf_flush_l1d to true from vmx_l1d_flush() x86/KVM/VMX: Replace 'vmx_l1d_flush_always' with 'vmx_l1d_flush_cond' x86/KVM/VMX: Move the l1tf_flush_l1d test to vmx_l1d_flush() x86/irq: Demote irq_cpustat_t::__softirq_pending to u16 x86/KVM/VMX: Introduce per-host-cpu analogue of l1tf_flush_l1d x86: Don't include linux/irq.h from asm/hardirq.h x86/irq: Let interrupt handlers set kvm_cpu_l1tf_flush_l1d x86/KVM/VMX: Don't set l1tf_flush_l1d from vmx_handle_external_intr() Documentation/l1tf: Remove Yonah processors from not vulnerable list KVM: x86: Add a framework for supporting MSR-based features KVM: SVM: Add MSR-based feature support for serializing LFENCE KVM: X86: Introduce kvm_get_msr_feature() KVM: X86: Allow userspace to define the microcode version KVM: VMX: support MSR_IA32_ARCH_CAPABILITIES as a feature MSR x86/speculation: Simplify sysfs report of VMX L1TF vulnerability x86/speculation: Use ARCH_CAPABILITIES to skip L1D flush on vmentry KVM: VMX: Tell the nested hypervisor to skip L1D flush on vmentry cpu/hotplug: Fix SMT supported evaluation x86/speculation/l1tf: Invert all not present mappings x86/speculation/l1tf: Make pmd/pud_mknotpresent() invert x86/mm/pat: Make set_memory_np() L1TF safe x86/mm/kmmio: Make the tracer robust against L1TF tools headers: Synchronise x86 cpufeatures.h for L1TF additions x86/microcode: Do not upload microcode if CPUs are offline x86/microcode: Allow late microcode loading with SMT disabled x86/smp: fix non-SMP broken build due to redefinition of apic_id_is_primary_thread cpu/hotplug: Non-SMP machines do not make use of booted_once x86/init: fix build with CONFIG_SWAP=n x86/speculation/l1tf: Unbreak !__HAVE_ARCH_PFN_MODIFY_ALLOWED architectures x86/cpu/amd: Limit cpu_core_id fixup to families older than F17h x86/CPU/AMD: Have smp_num_siblings and cpu_llc_id always be present Linux 4.9.120 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
f85543ba3c
106 changed files with 2714 additions and 393 deletions
|
@ -356,6 +356,7 @@ What: /sys/devices/system/cpu/vulnerabilities
|
|||
/sys/devices/system/cpu/vulnerabilities/spectre_v1
|
||||
/sys/devices/system/cpu/vulnerabilities/spectre_v2
|
||||
/sys/devices/system/cpu/vulnerabilities/spec_store_bypass
|
||||
/sys/devices/system/cpu/vulnerabilities/l1tf
|
||||
Date: January 2018
|
||||
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
||||
Description: Information about CPU vulnerabilities
|
||||
|
@ -367,3 +368,26 @@ Description: Information about CPU vulnerabilities
|
|||
"Not affected" CPU is not affected by the vulnerability
|
||||
"Vulnerable" CPU is affected and no mitigation in effect
|
||||
"Mitigation: $M" CPU is affected and mitigation $M is in effect
|
||||
|
||||
Details about the l1tf file can be found in
|
||||
Documentation/admin-guide/l1tf.rst
|
||||
|
||||
What: /sys/devices/system/cpu/smt
|
||||
/sys/devices/system/cpu/smt/active
|
||||
/sys/devices/system/cpu/smt/control
|
||||
Date: June 2018
|
||||
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
||||
Description: Control Symetric Multi Threading (SMT)
|
||||
|
||||
active: Tells whether SMT is active (enabled and siblings online)
|
||||
|
||||
control: Read/write interface to control SMT. Possible
|
||||
values:
|
||||
|
||||
"on" SMT is enabled
|
||||
"off" SMT is disabled
|
||||
"forceoff" SMT is force disabled. Cannot be changed.
|
||||
"notsupported" SMT is not supported by the CPU
|
||||
|
||||
If control status is "forceoff" or "notsupported" writes
|
||||
are rejected.
|
||||
|
|
|
@ -12,6 +12,7 @@ Contents:
|
|||
:maxdepth: 2
|
||||
|
||||
kernel-documentation
|
||||
l1tf
|
||||
development-process/index
|
||||
dev-tools/tools
|
||||
driver-api/index
|
||||
|
|
|
@ -2022,10 +2022,84 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
(virtualized real and unpaged mode) on capable
|
||||
Intel chips. Default is 1 (enabled)
|
||||
|
||||
kvm-intel.vmentry_l1d_flush=[KVM,Intel] Mitigation for L1 Terminal Fault
|
||||
CVE-2018-3620.
|
||||
|
||||
Valid arguments: never, cond, always
|
||||
|
||||
always: L1D cache flush on every VMENTER.
|
||||
cond: Flush L1D on VMENTER only when the code between
|
||||
VMEXIT and VMENTER can leak host memory.
|
||||
never: Disables the mitigation
|
||||
|
||||
Default is cond (do L1 cache flush in specific instances)
|
||||
|
||||
kvm-intel.vpid= [KVM,Intel] Disable Virtual Processor Identification
|
||||
feature (tagged TLBs) on capable Intel chips.
|
||||
Default is 1 (enabled)
|
||||
|
||||
l1tf= [X86] Control mitigation of the L1TF vulnerability on
|
||||
affected CPUs
|
||||
|
||||
The kernel PTE inversion protection is unconditionally
|
||||
enabled and cannot be disabled.
|
||||
|
||||
full
|
||||
Provides all available mitigations for the
|
||||
L1TF vulnerability. Disables SMT and
|
||||
enables all mitigations in the
|
||||
hypervisors, i.e. unconditional L1D flush.
|
||||
|
||||
SMT control and L1D flush control via the
|
||||
sysfs interface is still possible after
|
||||
boot. Hypervisors will issue a warning
|
||||
when the first VM is started in a
|
||||
potentially insecure configuration,
|
||||
i.e. SMT enabled or L1D flush disabled.
|
||||
|
||||
full,force
|
||||
Same as 'full', but disables SMT and L1D
|
||||
flush runtime control. Implies the
|
||||
'nosmt=force' command line option.
|
||||
(i.e. sysfs control of SMT is disabled.)
|
||||
|
||||
flush
|
||||
Leaves SMT enabled and enables the default
|
||||
hypervisor mitigation, i.e. conditional
|
||||
L1D flush.
|
||||
|
||||
SMT control and L1D flush control via the
|
||||
sysfs interface is still possible after
|
||||
boot. Hypervisors will issue a warning
|
||||
when the first VM is started in a
|
||||
potentially insecure configuration,
|
||||
i.e. SMT enabled or L1D flush disabled.
|
||||
|
||||
flush,nosmt
|
||||
|
||||
Disables SMT and enables the default
|
||||
hypervisor mitigation.
|
||||
|
||||
SMT control and L1D flush control via the
|
||||
sysfs interface is still possible after
|
||||
boot. Hypervisors will issue a warning
|
||||
when the first VM is started in a
|
||||
potentially insecure configuration,
|
||||
i.e. SMT enabled or L1D flush disabled.
|
||||
|
||||
flush,nowarn
|
||||
Same as 'flush', but hypervisors will not
|
||||
warn when a VM is started in a potentially
|
||||
insecure configuration.
|
||||
|
||||
off
|
||||
Disables hypervisor mitigations and doesn't
|
||||
emit any warnings.
|
||||
|
||||
Default is 'flush'.
|
||||
|
||||
For details see: Documentation/admin-guide/l1tf.rst
|
||||
|
||||
l2cr= [PPC]
|
||||
|
||||
l3cr= [PPC]
|
||||
|
@ -2706,6 +2780,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
nosmt [KNL,S390] Disable symmetric multithreading (SMT).
|
||||
Equivalent to smt=1.
|
||||
|
||||
[KNL,x86] Disable symmetric multithreading (SMT).
|
||||
nosmt=force: Force disable SMT, cannot be undone
|
||||
via the sysfs control file.
|
||||
|
||||
nospectre_v2 [X86] Disable all mitigations for the Spectre variant 2
|
||||
(indirect branch prediction) vulnerability. System may
|
||||
allow data leaks with this option, which is equivalent
|
||||
|
|
610
Documentation/l1tf.rst
Normal file
610
Documentation/l1tf.rst
Normal file
|
@ -0,0 +1,610 @@
|
|||
L1TF - L1 Terminal Fault
|
||||
========================
|
||||
|
||||
L1 Terminal Fault is a hardware vulnerability which allows unprivileged
|
||||
speculative access to data which is available in the Level 1 Data Cache
|
||||
when the page table entry controlling the virtual address, which is used
|
||||
for the access, has the Present bit cleared or other reserved bits set.
|
||||
|
||||
Affected processors
|
||||
-------------------
|
||||
|
||||
This vulnerability affects a wide range of Intel processors. The
|
||||
vulnerability is not present on:
|
||||
|
||||
- Processors from AMD, Centaur and other non Intel vendors
|
||||
|
||||
- Older processor models, where the CPU family is < 6
|
||||
|
||||
- A range of Intel ATOM processors (Cedarview, Cloverview, Lincroft,
|
||||
Penwell, Pineview, Silvermont, Airmont, Merrifield)
|
||||
|
||||
- The Intel XEON PHI family
|
||||
|
||||
- Intel processors which have the ARCH_CAP_RDCL_NO bit set in the
|
||||
IA32_ARCH_CAPABILITIES MSR. If the bit is set the CPU is not affected
|
||||
by the Meltdown vulnerability either. These CPUs should become
|
||||
available by end of 2018.
|
||||
|
||||
Whether a processor is affected or not can be read out from the L1TF
|
||||
vulnerability file in sysfs. See :ref:`l1tf_sys_info`.
|
||||
|
||||
Related CVEs
|
||||
------------
|
||||
|
||||
The following CVE entries are related to the L1TF vulnerability:
|
||||
|
||||
============= ================= ==============================
|
||||
CVE-2018-3615 L1 Terminal Fault SGX related aspects
|
||||
CVE-2018-3620 L1 Terminal Fault OS, SMM related aspects
|
||||
CVE-2018-3646 L1 Terminal Fault Virtualization related aspects
|
||||
============= ================= ==============================
|
||||
|
||||
Problem
|
||||
-------
|
||||
|
||||
If an instruction accesses a virtual address for which the relevant page
|
||||
table entry (PTE) has the Present bit cleared or other reserved bits set,
|
||||
then speculative execution ignores the invalid PTE and loads the referenced
|
||||
data if it is present in the Level 1 Data Cache, as if the page referenced
|
||||
by the address bits in the PTE was still present and accessible.
|
||||
|
||||
While this is a purely speculative mechanism and the instruction will raise
|
||||
a page fault when it is retired eventually, the pure act of loading the
|
||||
data and making it available to other speculative instructions opens up the
|
||||
opportunity for side channel attacks to unprivileged malicious code,
|
||||
similar to the Meltdown attack.
|
||||
|
||||
While Meltdown breaks the user space to kernel space protection, L1TF
|
||||
allows to attack any physical memory address in the system and the attack
|
||||
works across all protection domains. It allows an attack of SGX and also
|
||||
works from inside virtual machines because the speculation bypasses the
|
||||
extended page table (EPT) protection mechanism.
|
||||
|
||||
|
||||
Attack scenarios
|
||||
----------------
|
||||
|
||||
1. Malicious user space
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Operating Systems store arbitrary information in the address bits of a
|
||||
PTE which is marked non present. This allows a malicious user space
|
||||
application to attack the physical memory to which these PTEs resolve.
|
||||
In some cases user-space can maliciously influence the information
|
||||
encoded in the address bits of the PTE, thus making attacks more
|
||||
deterministic and more practical.
|
||||
|
||||
The Linux kernel contains a mitigation for this attack vector, PTE
|
||||
inversion, which is permanently enabled and has no performance
|
||||
impact. The kernel ensures that the address bits of PTEs, which are not
|
||||
marked present, never point to cacheable physical memory space.
|
||||
|
||||
A system with an up to date kernel is protected against attacks from
|
||||
malicious user space applications.
|
||||
|
||||
2. Malicious guest in a virtual machine
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The fact that L1TF breaks all domain protections allows malicious guest
|
||||
OSes, which can control the PTEs directly, and malicious guest user
|
||||
space applications, which run on an unprotected guest kernel lacking the
|
||||
PTE inversion mitigation for L1TF, to attack physical host memory.
|
||||
|
||||
A special aspect of L1TF in the context of virtualization is symmetric
|
||||
multi threading (SMT). The Intel implementation of SMT is called
|
||||
HyperThreading. The fact that Hyperthreads on the affected processors
|
||||
share the L1 Data Cache (L1D) is important for this. As the flaw allows
|
||||
only to attack data which is present in L1D, a malicious guest running
|
||||
on one Hyperthread can attack the data which is brought into the L1D by
|
||||
the context which runs on the sibling Hyperthread of the same physical
|
||||
core. This context can be host OS, host user space or a different guest.
|
||||
|
||||
If the processor does not support Extended Page Tables, the attack is
|
||||
only possible, when the hypervisor does not sanitize the content of the
|
||||
effective (shadow) page tables.
|
||||
|
||||
While solutions exist to mitigate these attack vectors fully, these
|
||||
mitigations are not enabled by default in the Linux kernel because they
|
||||
can affect performance significantly. The kernel provides several
|
||||
mechanisms which can be utilized to address the problem depending on the
|
||||
deployment scenario. The mitigations, their protection scope and impact
|
||||
are described in the next sections.
|
||||
|
||||
The default mitigations and the rationale for choosing them are explained
|
||||
at the end of this document. See :ref:`default_mitigations`.
|
||||
|
||||
.. _l1tf_sys_info:
|
||||
|
||||
L1TF system information
|
||||
-----------------------
|
||||
|
||||
The Linux kernel provides a sysfs interface to enumerate the current L1TF
|
||||
status of the system: whether the system is vulnerable, and which
|
||||
mitigations are active. The relevant sysfs file is:
|
||||
|
||||
/sys/devices/system/cpu/vulnerabilities/l1tf
|
||||
|
||||
The possible values in this file are:
|
||||
|
||||
=========================== ===============================
|
||||
'Not affected' The processor is not vulnerable
|
||||
'Mitigation: PTE Inversion' The host protection is active
|
||||
=========================== ===============================
|
||||
|
||||
If KVM/VMX is enabled and the processor is vulnerable then the following
|
||||
information is appended to the 'Mitigation: PTE Inversion' part:
|
||||
|
||||
- SMT status:
|
||||
|
||||
===================== ================
|
||||
'VMX: SMT vulnerable' SMT is enabled
|
||||
'VMX: SMT disabled' SMT is disabled
|
||||
===================== ================
|
||||
|
||||
- L1D Flush mode:
|
||||
|
||||
================================ ====================================
|
||||
'L1D vulnerable' L1D flushing is disabled
|
||||
|
||||
'L1D conditional cache flushes' L1D flush is conditionally enabled
|
||||
|
||||
'L1D cache flushes' L1D flush is unconditionally enabled
|
||||
================================ ====================================
|
||||
|
||||
The resulting grade of protection is discussed in the following sections.
|
||||
|
||||
|
||||
Host mitigation mechanism
|
||||
-------------------------
|
||||
|
||||
The kernel is unconditionally protected against L1TF attacks from malicious
|
||||
user space running on the host.
|
||||
|
||||
|
||||
Guest mitigation mechanisms
|
||||
---------------------------
|
||||
|
||||
.. _l1d_flush:
|
||||
|
||||
1. L1D flush on VMENTER
|
||||
^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To make sure that a guest cannot attack data which is present in the L1D
|
||||
the hypervisor flushes the L1D before entering the guest.
|
||||
|
||||
Flushing the L1D evicts not only the data which should not be accessed
|
||||
by a potentially malicious guest, it also flushes the guest
|
||||
data. Flushing the L1D has a performance impact as the processor has to
|
||||
bring the flushed guest data back into the L1D. Depending on the
|
||||
frequency of VMEXIT/VMENTER and the type of computations in the guest
|
||||
performance degradation in the range of 1% to 50% has been observed. For
|
||||
scenarios where guest VMEXIT/VMENTER are rare the performance impact is
|
||||
minimal. Virtio and mechanisms like posted interrupts are designed to
|
||||
confine the VMEXITs to a bare minimum, but specific configurations and
|
||||
application scenarios might still suffer from a high VMEXIT rate.
|
||||
|
||||
The kernel provides two L1D flush modes:
|
||||
- conditional ('cond')
|
||||
- unconditional ('always')
|
||||
|
||||
The conditional mode avoids L1D flushing after VMEXITs which execute
|
||||
only audited code paths before the corresponding VMENTER. These code
|
||||
paths have been verified that they cannot expose secrets or other
|
||||
interesting data to an attacker, but they can leak information about the
|
||||
address space layout of the hypervisor.
|
||||
|
||||
Unconditional mode flushes L1D on all VMENTER invocations and provides
|
||||
maximum protection. It has a higher overhead than the conditional
|
||||
mode. The overhead cannot be quantified correctly as it depends on the
|
||||
workload scenario and the resulting number of VMEXITs.
|
||||
|
||||
The general recommendation is to enable L1D flush on VMENTER. The kernel
|
||||
defaults to conditional mode on affected processors.
|
||||
|
||||
**Note**, that L1D flush does not prevent the SMT problem because the
|
||||
sibling thread will also bring back its data into the L1D which makes it
|
||||
attackable again.
|
||||
|
||||
L1D flush can be controlled by the administrator via the kernel command
|
||||
line and sysfs control files. See :ref:`mitigation_control_command_line`
|
||||
and :ref:`mitigation_control_kvm`.
|
||||
|
||||
.. _guest_confinement:
|
||||
|
||||
2. Guest VCPU confinement to dedicated physical cores
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
To address the SMT problem, it is possible to make a guest or a group of
|
||||
guests affine to one or more physical cores. The proper mechanism for
|
||||
that is to utilize exclusive cpusets to ensure that no other guest or
|
||||
host tasks can run on these cores.
|
||||
|
||||
If only a single guest or related guests run on sibling SMT threads on
|
||||
the same physical core then they can only attack their own memory and
|
||||
restricted parts of the host memory.
|
||||
|
||||
Host memory is attackable, when one of the sibling SMT threads runs in
|
||||
host OS (hypervisor) context and the other in guest context. The amount
|
||||
of valuable information from the host OS context depends on the context
|
||||
which the host OS executes, i.e. interrupts, soft interrupts and kernel
|
||||
threads. The amount of valuable data from these contexts cannot be
|
||||
declared as non-interesting for an attacker without deep inspection of
|
||||
the code.
|
||||
|
||||
**Note**, that assigning guests to a fixed set of physical cores affects
|
||||
the ability of the scheduler to do load balancing and might have
|
||||
negative effects on CPU utilization depending on the hosting
|
||||
scenario. Disabling SMT might be a viable alternative for particular
|
||||
scenarios.
|
||||
|
||||
For further information about confining guests to a single or to a group
|
||||
of cores consult the cpusets documentation:
|
||||
|
||||
https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt
|
||||
|
||||
.. _interrupt_isolation:
|
||||
|
||||
3. Interrupt affinity
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Interrupts can be made affine to logical CPUs. This is not universally
|
||||
true because there are types of interrupts which are truly per CPU
|
||||
interrupts, e.g. the local timer interrupt. Aside of that multi queue
|
||||
devices affine their interrupts to single CPUs or groups of CPUs per
|
||||
queue without allowing the administrator to control the affinities.
|
||||
|
||||
Moving the interrupts, which can be affinity controlled, away from CPUs
|
||||
which run untrusted guests, reduces the attack vector space.
|
||||
|
||||
Whether the interrupts with are affine to CPUs, which run untrusted
|
||||
guests, provide interesting data for an attacker depends on the system
|
||||
configuration and the scenarios which run on the system. While for some
|
||||
of the interrupts it can be assumed that they won't expose interesting
|
||||
information beyond exposing hints about the host OS memory layout, there
|
||||
is no way to make general assumptions.
|
||||
|
||||
Interrupt affinity can be controlled by the administrator via the
|
||||
/proc/irq/$NR/smp_affinity[_list] files. Limited documentation is
|
||||
available at:
|
||||
|
||||
https://www.kernel.org/doc/Documentation/IRQ-affinity.txt
|
||||
|
||||
.. _smt_control:
|
||||
|
||||
4. SMT control
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
To prevent the SMT issues of L1TF it might be necessary to disable SMT
|
||||
completely. Disabling SMT can have a significant performance impact, but
|
||||
the impact depends on the hosting scenario and the type of workloads.
|
||||
The impact of disabling SMT needs also to be weighted against the impact
|
||||
of other mitigation solutions like confining guests to dedicated cores.
|
||||
|
||||
The kernel provides a sysfs interface to retrieve the status of SMT and
|
||||
to control it. It also provides a kernel command line interface to
|
||||
control SMT.
|
||||
|
||||
The kernel command line interface consists of the following options:
|
||||
|
||||
=========== ==========================================================
|
||||
nosmt Affects the bring up of the secondary CPUs during boot. The
|
||||
kernel tries to bring all present CPUs online during the
|
||||
boot process. "nosmt" makes sure that from each physical
|
||||
core only one - the so called primary (hyper) thread is
|
||||
activated. Due to a design flaw of Intel processors related
|
||||
to Machine Check Exceptions the non primary siblings have
|
||||
to be brought up at least partially and are then shut down
|
||||
again. "nosmt" can be undone via the sysfs interface.
|
||||
|
||||
nosmt=force Has the same effect as "nosmt" but it does not allow to
|
||||
undo the SMT disable via the sysfs interface.
|
||||
=========== ==========================================================
|
||||
|
||||
The sysfs interface provides two files:
|
||||
|
||||
- /sys/devices/system/cpu/smt/control
|
||||
- /sys/devices/system/cpu/smt/active
|
||||
|
||||
/sys/devices/system/cpu/smt/control:
|
||||
|
||||
This file allows to read out the SMT control state and provides the
|
||||
ability to disable or (re)enable SMT. The possible states are:
|
||||
|
||||
============== ===================================================
|
||||
on SMT is supported by the CPU and enabled. All
|
||||
logical CPUs can be onlined and offlined without
|
||||
restrictions.
|
||||
|
||||
off SMT is supported by the CPU and disabled. Only
|
||||
the so called primary SMT threads can be onlined
|
||||
and offlined without restrictions. An attempt to
|
||||
online a non-primary sibling is rejected
|
||||
|
||||
forceoff Same as 'off' but the state cannot be controlled.
|
||||
Attempts to write to the control file are rejected.
|
||||
|
||||
notsupported The processor does not support SMT. It's therefore
|
||||
not affected by the SMT implications of L1TF.
|
||||
Attempts to write to the control file are rejected.
|
||||
============== ===================================================
|
||||
|
||||
The possible states which can be written into this file to control SMT
|
||||
state are:
|
||||
|
||||
- on
|
||||
- off
|
||||
- forceoff
|
||||
|
||||
/sys/devices/system/cpu/smt/active:
|
||||
|
||||
This file reports whether SMT is enabled and active, i.e. if on any
|
||||
physical core two or more sibling threads are online.
|
||||
|
||||
SMT control is also possible at boot time via the l1tf kernel command
|
||||
line parameter in combination with L1D flush control. See
|
||||
:ref:`mitigation_control_command_line`.
|
||||
|
||||
5. Disabling EPT
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
Disabling EPT for virtual machines provides full mitigation for L1TF even
|
||||
with SMT enabled, because the effective page tables for guests are
|
||||
managed and sanitized by the hypervisor. Though disabling EPT has a
|
||||
significant performance impact especially when the Meltdown mitigation
|
||||
KPTI is enabled.
|
||||
|
||||
EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter.
|
||||
|
||||
There is ongoing research and development for new mitigation mechanisms to
|
||||
address the performance impact of disabling SMT or EPT.
|
||||
|
||||
.. _mitigation_control_command_line:
|
||||
|
||||
Mitigation control on the kernel command line
|
||||
---------------------------------------------
|
||||
|
||||
The kernel command line allows to control the L1TF mitigations at boot
|
||||
time with the option "l1tf=". The valid arguments for this option are:
|
||||
|
||||
============ =============================================================
|
||||
full Provides all available mitigations for the L1TF
|
||||
vulnerability. Disables SMT and enables all mitigations in
|
||||
the hypervisors, i.e. unconditional L1D flushing
|
||||
|
||||
SMT control and L1D flush control via the sysfs interface
|
||||
is still possible after boot. Hypervisors will issue a
|
||||
warning when the first VM is started in a potentially
|
||||
insecure configuration, i.e. SMT enabled or L1D flush
|
||||
disabled.
|
||||
|
||||
full,force Same as 'full', but disables SMT and L1D flush runtime
|
||||
control. Implies the 'nosmt=force' command line option.
|
||||
(i.e. sysfs control of SMT is disabled.)
|
||||
|
||||
flush Leaves SMT enabled and enables the default hypervisor
|
||||
mitigation, i.e. conditional L1D flushing
|
||||
|
||||
SMT control and L1D flush control via the sysfs interface
|
||||
is still possible after boot. Hypervisors will issue a
|
||||
warning when the first VM is started in a potentially
|
||||
insecure configuration, i.e. SMT enabled or L1D flush
|
||||
disabled.
|
||||
|
||||
flush,nosmt Disables SMT and enables the default hypervisor mitigation,
|
||||
i.e. conditional L1D flushing.
|
||||
|
||||
SMT control and L1D flush control via the sysfs interface
|
||||
is still possible after boot. Hypervisors will issue a
|
||||
warning when the first VM is started in a potentially
|
||||
insecure configuration, i.e. SMT enabled or L1D flush
|
||||
disabled.
|
||||
|
||||
flush,nowarn Same as 'flush', but hypervisors will not warn when a VM is
|
||||
started in a potentially insecure configuration.
|
||||
|
||||
off Disables hypervisor mitigations and doesn't emit any
|
||||
warnings.
|
||||
============ =============================================================
|
||||
|
||||
The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
|
||||
|
||||
|
||||
.. _mitigation_control_kvm:
|
||||
|
||||
Mitigation control for KVM - module parameter
|
||||
-------------------------------------------------------------
|
||||
|
||||
The KVM hypervisor mitigation mechanism, flushing the L1D cache when
|
||||
entering a guest, can be controlled with a module parameter.
|
||||
|
||||
The option/parameter is "kvm-intel.vmentry_l1d_flush=". It takes the
|
||||
following arguments:
|
||||
|
||||
============ ==============================================================
|
||||
always L1D cache flush on every VMENTER.
|
||||
|
||||
cond Flush L1D on VMENTER only when the code between VMEXIT and
|
||||
VMENTER can leak host memory which is considered
|
||||
interesting for an attacker. This still can leak host memory
|
||||
which allows e.g. to determine the hosts address space layout.
|
||||
|
||||
never Disables the mitigation
|
||||
============ ==============================================================
|
||||
|
||||
The parameter can be provided on the kernel command line, as a module
|
||||
parameter when loading the modules and at runtime modified via the sysfs
|
||||
file:
|
||||
|
||||
/sys/module/kvm_intel/parameters/vmentry_l1d_flush
|
||||
|
||||
The default is 'cond'. If 'l1tf=full,force' is given on the kernel command
|
||||
line, then 'always' is enforced and the kvm-intel.vmentry_l1d_flush
|
||||
module parameter is ignored and writes to the sysfs file are rejected.
|
||||
|
||||
|
||||
Mitigation selection guide
|
||||
--------------------------
|
||||
|
||||
1. No virtualization in use
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The system is protected by the kernel unconditionally and no further
|
||||
action is required.
|
||||
|
||||
2. Virtualization with trusted guests
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If the guest comes from a trusted source and the guest OS kernel is
|
||||
guaranteed to have the L1TF mitigations in place the system is fully
|
||||
protected against L1TF and no further action is required.
|
||||
|
||||
To avoid the overhead of the default L1D flushing on VMENTER the
|
||||
administrator can disable the flushing via the kernel command line and
|
||||
sysfs control files. See :ref:`mitigation_control_command_line` and
|
||||
:ref:`mitigation_control_kvm`.
|
||||
|
||||
|
||||
3. Virtualization with untrusted guests
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
3.1. SMT not supported or disabled
|
||||
""""""""""""""""""""""""""""""""""
|
||||
|
||||
If SMT is not supported by the processor or disabled in the BIOS or by
|
||||
the kernel, it's only required to enforce L1D flushing on VMENTER.
|
||||
|
||||
Conditional L1D flushing is the default behaviour and can be tuned. See
|
||||
:ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`.
|
||||
|
||||
3.2. EPT not supported or disabled
|
||||
""""""""""""""""""""""""""""""""""
|
||||
|
||||
If EPT is not supported by the processor or disabled in the hypervisor,
|
||||
the system is fully protected. SMT can stay enabled and L1D flushing on
|
||||
VMENTER is not required.
|
||||
|
||||
EPT can be disabled in the hypervisor via the 'kvm-intel.ept' parameter.
|
||||
|
||||
3.3. SMT and EPT supported and active
|
||||
"""""""""""""""""""""""""""""""""""""
|
||||
|
||||
If SMT and EPT are supported and active then various degrees of
|
||||
mitigations can be employed:
|
||||
|
||||
- L1D flushing on VMENTER:
|
||||
|
||||
L1D flushing on VMENTER is the minimal protection requirement, but it
|
||||
is only potent in combination with other mitigation methods.
|
||||
|
||||
Conditional L1D flushing is the default behaviour and can be tuned. See
|
||||
:ref:`mitigation_control_command_line` and :ref:`mitigation_control_kvm`.
|
||||
|
||||
- Guest confinement:
|
||||
|
||||
Confinement of guests to a single or a group of physical cores which
|
||||
are not running any other processes, can reduce the attack surface
|
||||
significantly, but interrupts, soft interrupts and kernel threads can
|
||||
still expose valuable data to a potential attacker. See
|
||||
:ref:`guest_confinement`.
|
||||
|
||||
- Interrupt isolation:
|
||||
|
||||
Isolating the guest CPUs from interrupts can reduce the attack surface
|
||||
further, but still allows a malicious guest to explore a limited amount
|
||||
of host physical memory. This can at least be used to gain knowledge
|
||||
about the host address space layout. The interrupts which have a fixed
|
||||
affinity to the CPUs which run the untrusted guests can depending on
|
||||
the scenario still trigger soft interrupts and schedule kernel threads
|
||||
which might expose valuable information. See
|
||||
:ref:`interrupt_isolation`.
|
||||
|
||||
The above three mitigation methods combined can provide protection to a
|
||||
certain degree, but the risk of the remaining attack surface has to be
|
||||
carefully analyzed. For full protection the following methods are
|
||||
available:
|
||||
|
||||
- Disabling SMT:
|
||||
|
||||
Disabling SMT and enforcing the L1D flushing provides the maximum
|
||||
amount of protection. This mitigation is not depending on any of the
|
||||
above mitigation methods.
|
||||
|
||||
SMT control and L1D flushing can be tuned by the command line
|
||||
parameters 'nosmt', 'l1tf', 'kvm-intel.vmentry_l1d_flush' and at run
|
||||
time with the matching sysfs control files. See :ref:`smt_control`,
|
||||
:ref:`mitigation_control_command_line` and
|
||||
:ref:`mitigation_control_kvm`.
|
||||
|
||||
- Disabling EPT:
|
||||
|
||||
Disabling EPT provides the maximum amount of protection as well. It is
|
||||
not depending on any of the above mitigation methods. SMT can stay
|
||||
enabled and L1D flushing is not required, but the performance impact is
|
||||
significant.
|
||||
|
||||
EPT can be disabled in the hypervisor via the 'kvm-intel.ept'
|
||||
parameter.
|
||||
|
||||
3.4. Nested virtual machines
|
||||
""""""""""""""""""""""""""""
|
||||
|
||||
When nested virtualization is in use, three operating systems are involved:
|
||||
the bare metal hypervisor, the nested hypervisor and the nested virtual
|
||||
machine. VMENTER operations from the nested hypervisor into the nested
|
||||
guest will always be processed by the bare metal hypervisor. If KVM is the
|
||||
bare metal hypervisor it wiil:
|
||||
|
||||
- Flush the L1D cache on every switch from the nested hypervisor to the
|
||||
nested virtual machine, so that the nested hypervisor's secrets are not
|
||||
exposed to the nested virtual machine;
|
||||
|
||||
- Flush the L1D cache on every switch from the nested virtual machine to
|
||||
the nested hypervisor; this is a complex operation, and flushing the L1D
|
||||
cache avoids that the bare metal hypervisor's secrets are exposed to the
|
||||
nested virtual machine;
|
||||
|
||||
- Instruct the nested hypervisor to not perform any L1D cache flush. This
|
||||
is an optimization to avoid double L1D flushing.
|
||||
|
||||
|
||||
.. _default_mitigations:
|
||||
|
||||
Default mitigations
|
||||
-------------------
|
||||
|
||||
The kernel default mitigations for vulnerable processors are:
|
||||
|
||||
- PTE inversion to protect against malicious user space. This is done
|
||||
unconditionally and cannot be controlled.
|
||||
|
||||
- L1D conditional flushing on VMENTER when EPT is enabled for
|
||||
a guest.
|
||||
|
||||
The kernel does not by default enforce the disabling of SMT, which leaves
|
||||
SMT systems vulnerable when running untrusted guests with EPT enabled.
|
||||
|
||||
The rationale for this choice is:
|
||||
|
||||
- Force disabling SMT can break existing setups, especially with
|
||||
unattended updates.
|
||||
|
||||
- If regular users run untrusted guests on their machine, then L1TF is
|
||||
just an add on to other malware which might be embedded in an untrusted
|
||||
guest, e.g. spam-bots or attacks on the local network.
|
||||
|
||||
There is no technical way to prevent a user from running untrusted code
|
||||
on their machines blindly.
|
||||
|
||||
- It's technically extremely unlikely and from today's knowledge even
|
||||
impossible that L1TF can be exploited via the most popular attack
|
||||
mechanisms like JavaScript because these mechanisms have no way to
|
||||
control PTEs. If this would be possible and not other mitigation would
|
||||
be possible, then the default might be different.
|
||||
|
||||
- The administrators of cloud and hosting setups have to carefully
|
||||
analyze the risk for their scenarios and make the appropriate
|
||||
mitigation choices, which might even vary across their deployed
|
||||
machines and also result in other changes of their overall setup.
|
||||
There is no way for the kernel to provide a sensible default for this
|
||||
kind of scenarios.
|
|
@ -122,14 +122,15 @@ KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as
|
|||
privileged user (CAP_SYS_ADMIN).
|
||||
|
||||
|
||||
4.3 KVM_GET_MSR_INDEX_LIST
|
||||
4.3 KVM_GET_MSR_INDEX_LIST, KVM_GET_MSR_FEATURE_INDEX_LIST
|
||||
|
||||
Capability: basic
|
||||
Capability: basic, KVM_CAP_GET_MSR_FEATURES for KVM_GET_MSR_FEATURE_INDEX_LIST
|
||||
Architectures: x86
|
||||
Type: system
|
||||
Type: system ioctl
|
||||
Parameters: struct kvm_msr_list (in/out)
|
||||
Returns: 0 on success; -1 on error
|
||||
Errors:
|
||||
EFAULT: the msr index list cannot be read from or written to
|
||||
E2BIG: the msr index list is to be to fit in the array specified by
|
||||
the user.
|
||||
|
||||
|
@ -138,16 +139,23 @@ struct kvm_msr_list {
|
|||
__u32 indices[0];
|
||||
};
|
||||
|
||||
This ioctl returns the guest msrs that are supported. The list varies
|
||||
by kvm version and host processor, but does not change otherwise. The
|
||||
user fills in the size of the indices array in nmsrs, and in return
|
||||
kvm adjusts nmsrs to reflect the actual number of msrs and fills in
|
||||
the indices array with their numbers.
|
||||
The user fills in the size of the indices array in nmsrs, and in return
|
||||
kvm adjusts nmsrs to reflect the actual number of msrs and fills in the
|
||||
indices array with their numbers.
|
||||
|
||||
KVM_GET_MSR_INDEX_LIST returns the guest msrs that are supported. The list
|
||||
varies by kvm version and host processor, but does not change otherwise.
|
||||
|
||||
Note: if kvm indicates supports MCE (KVM_CAP_MCE), then the MCE bank MSRs are
|
||||
not returned in the MSR list, as different vcpus can have a different number
|
||||
of banks, as set via the KVM_X86_SETUP_MCE ioctl.
|
||||
|
||||
KVM_GET_MSR_FEATURE_INDEX_LIST returns the list of MSRs that can be passed
|
||||
to the KVM_GET_MSRS system ioctl. This lets userspace probe host capabilities
|
||||
and processor features that are exposed via MSRs (e.g., VMX capabilities).
|
||||
This list also varies by kvm version and host processor, but does not change
|
||||
otherwise.
|
||||
|
||||
|
||||
4.4 KVM_CHECK_EXTENSION
|
||||
|
||||
|
@ -474,14 +482,22 @@ Support for this has been removed. Use KVM_SET_GUEST_DEBUG instead.
|
|||
|
||||
4.18 KVM_GET_MSRS
|
||||
|
||||
Capability: basic
|
||||
Capability: basic (vcpu), KVM_CAP_GET_MSR_FEATURES (system)
|
||||
Architectures: x86
|
||||
Type: vcpu ioctl
|
||||
Type: system ioctl, vcpu ioctl
|
||||
Parameters: struct kvm_msrs (in/out)
|
||||
Returns: 0 on success, -1 on error
|
||||
Returns: number of msrs successfully returned;
|
||||
-1 on error
|
||||
|
||||
When used as a system ioctl:
|
||||
Reads the values of MSR-based features that are available for the VM. This
|
||||
is similar to KVM_GET_SUPPORTED_CPUID, but it returns MSR indices and values.
|
||||
The list of msr-based features can be obtained using KVM_GET_MSR_FEATURE_INDEX_LIST
|
||||
in a system ioctl.
|
||||
|
||||
When used as a vcpu ioctl:
|
||||
Reads model-specific registers from the vcpu. Supported msr indices can
|
||||
be obtained using KVM_GET_MSR_INDEX_LIST.
|
||||
be obtained using KVM_GET_MSR_INDEX_LIST in a system ioctl.
|
||||
|
||||
struct kvm_msrs {
|
||||
__u32 nmsrs; /* number of msrs in entries */
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 119
|
||||
SUBLEVEL = 120
|
||||
EXTRAVERSION =
|
||||
NAME = Roaring Lionus
|
||||
|
||||
|
|
|
@ -5,6 +5,9 @@
|
|||
config KEXEC_CORE
|
||||
bool
|
||||
|
||||
config HOTPLUG_SMT
|
||||
bool
|
||||
|
||||
config OPROFILE
|
||||
tristate "OProfile system profiling"
|
||||
depends on PROFILING
|
||||
|
|
|
@ -1280,7 +1280,7 @@
|
|||
/* non-prefetchable memory */
|
||||
0x82000000 0 0x08000000 0x08000000 0 0x00f00000>;
|
||||
num-lanes = <1>;
|
||||
interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clks IMX6SX_CLK_PCIE_REF_125M>,
|
||||
<&clks IMX6SX_CLK_PCIE_AXI>,
|
||||
<&clks IMX6SX_CLK_LVDS1_OUT>,
|
||||
|
|
|
@ -184,7 +184,7 @@ config PREFETCH
|
|||
|
||||
config MLONGCALLS
|
||||
bool "Enable the -mlong-calls compiler option for big kernels"
|
||||
def_bool y if (!MODULES)
|
||||
default y
|
||||
depends on PA8X00
|
||||
help
|
||||
If you configure the kernel to include many drivers built-in instead
|
||||
|
|
32
arch/parisc/include/asm/barrier.h
Normal file
32
arch/parisc/include/asm/barrier.h
Normal file
|
@ -0,0 +1,32 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __ASM_BARRIER_H
|
||||
#define __ASM_BARRIER_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* The synchronize caches instruction executes as a nop on systems in
|
||||
which all memory references are performed in order. */
|
||||
#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
|
||||
|
||||
#if defined(CONFIG_SMP)
|
||||
#define mb() do { synchronize_caches(); } while (0)
|
||||
#define rmb() mb()
|
||||
#define wmb() mb()
|
||||
#define dma_rmb() mb()
|
||||
#define dma_wmb() mb()
|
||||
#else
|
||||
#define mb() barrier()
|
||||
#define rmb() barrier()
|
||||
#define wmb() barrier()
|
||||
#define dma_rmb() barrier()
|
||||
#define dma_wmb() barrier()
|
||||
#endif
|
||||
|
||||
#define __smp_mb() mb()
|
||||
#define __smp_rmb() mb()
|
||||
#define __smp_wmb() mb()
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* __ASM_BARRIER_H */
|
|
@ -481,6 +481,8 @@
|
|||
/* Release pa_tlb_lock lock without reloading lock address. */
|
||||
.macro tlb_unlock0 spc,tmp
|
||||
#ifdef CONFIG_SMP
|
||||
or,COND(=) %r0,\spc,%r0
|
||||
sync
|
||||
or,COND(=) %r0,\spc,%r0
|
||||
stw \spc,0(\tmp)
|
||||
#endif
|
||||
|
|
|
@ -354,6 +354,7 @@ ENDPROC_CFI(flush_data_cache_local)
|
|||
.macro tlb_unlock la,flags,tmp
|
||||
#ifdef CONFIG_SMP
|
||||
ldi 1,\tmp
|
||||
sync
|
||||
stw \tmp,0(\la)
|
||||
mtsm \flags
|
||||
#endif
|
||||
|
|
|
@ -633,6 +633,7 @@ cas_action:
|
|||
sub,<> %r28, %r25, %r0
|
||||
2: stw,ma %r24, 0(%r26)
|
||||
/* Free lock */
|
||||
sync
|
||||
stw,ma %r20, 0(%sr2,%r20)
|
||||
#if ENABLE_LWS_DEBUG
|
||||
/* Clear thread register indicator */
|
||||
|
@ -647,6 +648,7 @@ cas_action:
|
|||
3:
|
||||
/* Error occurred on load or store */
|
||||
/* Free lock */
|
||||
sync
|
||||
stw %r20, 0(%sr2,%r20)
|
||||
#if ENABLE_LWS_DEBUG
|
||||
stw %r0, 4(%sr2,%r20)
|
||||
|
@ -848,6 +850,7 @@ cas2_action:
|
|||
|
||||
cas2_end:
|
||||
/* Free lock */
|
||||
sync
|
||||
stw,ma %r20, 0(%sr2,%r20)
|
||||
/* Enable interrupts */
|
||||
ssm PSW_SM_I, %r0
|
||||
|
@ -858,6 +861,7 @@ cas2_end:
|
|||
22:
|
||||
/* Error occurred on load or store */
|
||||
/* Free lock */
|
||||
sync
|
||||
stw %r20, 0(%sr2,%r20)
|
||||
ssm PSW_SM_I, %r0
|
||||
ldo 1(%r0),%r28
|
||||
|
|
|
@ -147,6 +147,7 @@ config X86
|
|||
select HAVE_UID16 if X86_32 || IA32_EMULATION
|
||||
select HAVE_UNSTABLE_SCHED_CLOCK
|
||||
select HAVE_USER_RETURN_NOTIFIER
|
||||
select HOTPLUG_SMT if SMP
|
||||
select IRQ_FORCED_THREADING
|
||||
select MODULES_USE_ELF_RELA if X86_64
|
||||
select MODULES_USE_ELF_REL if X86_32
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <asm/mpspec.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/idle.h>
|
||||
#include <asm/hardirq.h>
|
||||
|
||||
#define ARCH_APICTIMER_STOPS_ON_C3 1
|
||||
|
||||
|
@ -633,6 +634,13 @@ extern int default_check_phys_apicid_present(int phys_apicid);
|
|||
#endif
|
||||
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
bool apic_id_is_primary_thread(unsigned int id);
|
||||
#else
|
||||
static inline bool apic_id_is_primary_thread(unsigned int id) { return false; }
|
||||
#endif
|
||||
|
||||
extern void irq_enter(void);
|
||||
extern void irq_exit(void);
|
||||
|
||||
|
@ -640,6 +648,7 @@ static inline void entering_irq(void)
|
|||
{
|
||||
irq_enter();
|
||||
exit_idle();
|
||||
kvm_set_cpu_l1tf_flush_l1d();
|
||||
}
|
||||
|
||||
static inline void entering_ack_irq(void)
|
||||
|
@ -652,6 +661,7 @@ static inline void ipi_entering_ack_irq(void)
|
|||
{
|
||||
irq_enter();
|
||||
ack_APIC_irq();
|
||||
kvm_set_cpu_l1tf_flush_l1d();
|
||||
}
|
||||
|
||||
static inline void exiting_irq(void)
|
||||
|
|
|
@ -213,7 +213,7 @@
|
|||
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
|
||||
|
||||
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
|
||||
|
||||
/* Virtualization flags: Linux defined, word 8 */
|
||||
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
||||
|
@ -317,6 +317,7 @@
|
|||
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
|
||||
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
||||
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
|
||||
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
|
||||
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
|
||||
|
||||
|
@ -349,5 +350,6 @@
|
|||
#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
|
||||
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
|
||||
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
|
||||
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
|
|
@ -3,8 +3,8 @@
|
|||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
static __always_inline __init void *dmi_alloc(unsigned len)
|
||||
|
|
|
@ -2,10 +2,12 @@
|
|||
#define _ASM_X86_HARDIRQ_H
|
||||
|
||||
#include <linux/threads.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
typedef struct {
|
||||
unsigned int __softirq_pending;
|
||||
u16 __softirq_pending;
|
||||
#if IS_ENABLED(CONFIG_KVM_INTEL)
|
||||
u8 kvm_cpu_l1tf_flush_l1d;
|
||||
#endif
|
||||
unsigned int __nmi_count; /* arch dependent */
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
unsigned int apic_timer_irqs; /* arch dependent */
|
||||
|
@ -60,4 +62,24 @@ extern u64 arch_irq_stat_cpu(unsigned int cpu);
|
|||
extern u64 arch_irq_stat(void);
|
||||
#define arch_irq_stat arch_irq_stat
|
||||
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM_INTEL)
|
||||
static inline void kvm_set_cpu_l1tf_flush_l1d(void)
|
||||
{
|
||||
__this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 1);
|
||||
}
|
||||
|
||||
static inline void kvm_clear_cpu_l1tf_flush_l1d(void)
|
||||
{
|
||||
__this_cpu_write(irq_stat.kvm_cpu_l1tf_flush_l1d, 0);
|
||||
}
|
||||
|
||||
static inline bool kvm_get_cpu_l1tf_flush_l1d(void)
|
||||
{
|
||||
return __this_cpu_read(irq_stat.kvm_cpu_l1tf_flush_l1d);
|
||||
}
|
||||
#else /* !IS_ENABLED(CONFIG_KVM_INTEL) */
|
||||
static inline void kvm_set_cpu_l1tf_flush_l1d(void) { }
|
||||
#endif /* IS_ENABLED(CONFIG_KVM_INTEL) */
|
||||
|
||||
#endif /* _ASM_X86_HARDIRQ_H */
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
* Interrupt control:
|
||||
*/
|
||||
|
||||
/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
|
||||
extern inline unsigned long native_save_fl(void);
|
||||
extern inline unsigned long native_save_fl(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/tracepoint.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/irq_work.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_para.h>
|
||||
|
@ -485,6 +486,7 @@ struct kvm_vcpu_arch {
|
|||
u64 smbase;
|
||||
bool tpr_access_reporting;
|
||||
u64 ia32_xss;
|
||||
u64 microcode_version;
|
||||
|
||||
/*
|
||||
* Paging state of the vcpu
|
||||
|
@ -659,6 +661,9 @@ struct kvm_vcpu_arch {
|
|||
|
||||
int pending_ioapic_eoi;
|
||||
int pending_external_vector;
|
||||
|
||||
/* Flush the L1 Data cache for L1TF mitigation on VMENTER */
|
||||
bool l1tf_flush_l1d;
|
||||
};
|
||||
|
||||
struct kvm_lpage_info {
|
||||
|
@ -819,6 +824,7 @@ struct kvm_vcpu_stat {
|
|||
u64 signal_exits;
|
||||
u64 irq_window_exits;
|
||||
u64 nmi_window_exits;
|
||||
u64 l1d_flush;
|
||||
u64 halt_exits;
|
||||
u64 halt_successful_poll;
|
||||
u64 halt_attempted_poll;
|
||||
|
@ -1020,6 +1026,8 @@ struct kvm_x86_ops {
|
|||
void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
|
||||
|
||||
void (*setup_mce)(struct kvm_vcpu *vcpu);
|
||||
|
||||
int (*get_msr_feature)(struct kvm_msr_entry *entry);
|
||||
};
|
||||
|
||||
struct kvm_arch_async_pf {
|
||||
|
@ -1338,6 +1346,7 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
|
|||
void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
|
||||
unsigned long address);
|
||||
|
||||
u64 kvm_get_arch_capabilities(void);
|
||||
void kvm_define_shared_msr(unsigned index, u32 msr);
|
||||
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
|
||||
|
||||
|
|
|
@ -63,12 +63,19 @@
|
|||
#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
|
||||
#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
|
||||
#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
|
||||
#define ARCH_CAP_SKIP_VMENTRY_L1DFLUSH (1 << 3) /* Skip L1D flush on vmentry */
|
||||
#define ARCH_CAP_SSB_NO (1 << 4) /*
|
||||
* Not susceptible to Speculative Store Bypass
|
||||
* attack, so no Speculative Store Bypass
|
||||
* control required.
|
||||
*/
|
||||
|
||||
#define MSR_IA32_FLUSH_CMD 0x0000010b
|
||||
#define L1D_FLUSH (1 << 0) /*
|
||||
* Writeback and invalidate the
|
||||
* L1 data cache.
|
||||
*/
|
||||
|
||||
#define MSR_IA32_BBL_CR_CTL 0x00000119
|
||||
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
|
||||
|
||||
|
|
|
@ -28,8 +28,13 @@
|
|||
#define N_EXCEPTION_STACKS 1
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
/* 44=32+12, the limit we can fit into an unsigned long pfn */
|
||||
#define __PHYSICAL_MASK_SHIFT 44
|
||||
/*
|
||||
* This is beyond the 44 bit limit imposed by the 32bit long pfns,
|
||||
* but we need the full mask to make sure inverted PROT_NONE
|
||||
* entries have all the host bits set in a guest.
|
||||
* The real limit is still 44 bits.
|
||||
*/
|
||||
#define __PHYSICAL_MASK_SHIFT 52
|
||||
#define __VIRTUAL_MASK_SHIFT 32
|
||||
|
||||
#else /* !CONFIG_X86_PAE */
|
||||
|
|
|
@ -77,4 +77,21 @@ static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshi
|
|||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
|
||||
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
|
||||
|
||||
/* No inverted PFNs on 2 level page tables */
|
||||
|
||||
static inline u64 protnone_mask(u64 val)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
|
||||
{
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline bool __pte_needs_invert(u64 val)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_2LEVEL_H */
|
||||
|
|
|
@ -177,11 +177,44 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
|
|||
#endif
|
||||
|
||||
/* Encode and de-code a swap entry */
|
||||
#define SWP_TYPE_BITS 5
|
||||
|
||||
#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
|
||||
|
||||
/* We always extract/encode the offset by shifting it all the way up, and then down again */
|
||||
#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
|
||||
|
||||
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
|
||||
#define __swp_type(x) (((x).val) & 0x1f)
|
||||
#define __swp_offset(x) ((x).val >> 5)
|
||||
#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
|
||||
#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
|
||||
|
||||
/*
|
||||
* Normally, __swp_entry() converts from arch-independent swp_entry_t to
|
||||
* arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
|
||||
* to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the
|
||||
* whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
|
||||
* __swp_entry_to_pte() through the following helper macro based on 64bit
|
||||
* __swp_entry().
|
||||
*/
|
||||
#define __swp_pteval_entry(type, offset) ((pteval_t) { \
|
||||
(~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
|
||||
| ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
|
||||
|
||||
#define __swp_entry_to_pte(x) ((pte_t){ .pte = \
|
||||
__swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
|
||||
/*
|
||||
* Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
|
||||
* swp_entry_t, but also has to convert it from 64bit to the 32bit
|
||||
* intermediate representation, using the following macros based on 64bit
|
||||
* __swp_type() and __swp_offset().
|
||||
*/
|
||||
#define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
|
||||
#define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
|
||||
|
||||
#define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \
|
||||
__pteval_swp_offset(pte)))
|
||||
|
||||
#include <asm/pgtable-invert.h>
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_3LEVEL_H */
|
||||
|
|
32
arch/x86/include/asm/pgtable-invert.h
Normal file
32
arch/x86/include/asm/pgtable-invert.h
Normal file
|
@ -0,0 +1,32 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_PGTABLE_INVERT_H
|
||||
#define _ASM_PGTABLE_INVERT_H 1
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
static inline bool __pte_needs_invert(u64 val)
|
||||
{
|
||||
return !(val & _PAGE_PRESENT);
|
||||
}
|
||||
|
||||
/* Get a mask to xor with the page table entry to get the correct pfn. */
|
||||
static inline u64 protnone_mask(u64 val)
|
||||
{
|
||||
return __pte_needs_invert(val) ? ~0ull : 0;
|
||||
}
|
||||
|
||||
static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
|
||||
{
|
||||
/*
|
||||
* When a PTE transitions from NONE to !NONE or vice-versa
|
||||
* invert the PFN part to stop speculation.
|
||||
* pte_pfn undoes this when needed.
|
||||
*/
|
||||
if (__pte_needs_invert(oldval) != __pte_needs_invert(val))
|
||||
val = (val & ~mask) | (~val & mask);
|
||||
return val;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif
|
|
@ -165,19 +165,29 @@ static inline int pte_special(pte_t pte)
|
|||
return pte_flags(pte) & _PAGE_SPECIAL;
|
||||
}
|
||||
|
||||
/* Entries that were set to PROT_NONE are inverted */
|
||||
|
||||
static inline u64 protnone_mask(u64 val);
|
||||
|
||||
static inline unsigned long pte_pfn(pte_t pte)
|
||||
{
|
||||
return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
phys_addr_t pfn = pte_val(pte);
|
||||
pfn ^= protnone_mask(pfn);
|
||||
return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned long pmd_pfn(pmd_t pmd)
|
||||
{
|
||||
return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
|
||||
phys_addr_t pfn = pmd_val(pmd);
|
||||
pfn ^= protnone_mask(pfn);
|
||||
return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static inline unsigned long pud_pfn(pud_t pud)
|
||||
{
|
||||
return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
|
||||
phys_addr_t pfn = pud_val(pud);
|
||||
pfn ^= protnone_mask(pfn);
|
||||
return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
|
||||
|
@ -340,11 +350,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
|
|||
return pmd_set_flags(pmd, _PAGE_RW);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
|
||||
{
|
||||
return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
|
||||
static inline int pte_soft_dirty(pte_t pte)
|
||||
{
|
||||
|
@ -394,19 +399,58 @@ static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
|
|||
|
||||
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
|
||||
{
|
||||
return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
|
||||
massage_pgprot(pgprot));
|
||||
phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
|
||||
pfn ^= protnone_mask(pgprot_val(pgprot));
|
||||
pfn &= PTE_PFN_MASK;
|
||||
return __pte(pfn | massage_pgprot(pgprot));
|
||||
}
|
||||
|
||||
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
|
||||
{
|
||||
return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
|
||||
massage_pgprot(pgprot));
|
||||
phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
|
||||
pfn ^= protnone_mask(pgprot_val(pgprot));
|
||||
pfn &= PHYSICAL_PMD_PAGE_MASK;
|
||||
return __pmd(pfn | massage_pgprot(pgprot));
|
||||
}
|
||||
|
||||
static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
|
||||
{
|
||||
phys_addr_t pfn = page_nr << PAGE_SHIFT;
|
||||
pfn ^= protnone_mask(pgprot_val(pgprot));
|
||||
pfn &= PHYSICAL_PUD_PAGE_MASK;
|
||||
return __pud(pfn | massage_pgprot(pgprot));
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
|
||||
{
|
||||
return pfn_pmd(pmd_pfn(pmd),
|
||||
__pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
|
||||
}
|
||||
|
||||
static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
|
||||
{
|
||||
pudval_t v = native_pud_val(pud);
|
||||
|
||||
return __pud(v | set);
|
||||
}
|
||||
|
||||
static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
|
||||
{
|
||||
pudval_t v = native_pud_val(pud);
|
||||
|
||||
return __pud(v & ~clear);
|
||||
}
|
||||
|
||||
static inline pud_t pud_mkhuge(pud_t pud)
|
||||
{
|
||||
return pud_set_flags(pud, _PAGE_PSE);
|
||||
}
|
||||
|
||||
static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
|
||||
|
||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
{
|
||||
pteval_t val = pte_val(pte);
|
||||
pteval_t val = pte_val(pte), oldval = val;
|
||||
|
||||
/*
|
||||
* Chop off the NX bit (if present), and add the NX portion of
|
||||
|
@ -414,17 +458,17 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|||
*/
|
||||
val &= _PAGE_CHG_MASK;
|
||||
val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
|
||||
|
||||
val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
|
||||
return __pte(val);
|
||||
}
|
||||
|
||||
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
|
||||
{
|
||||
pmdval_t val = pmd_val(pmd);
|
||||
pmdval_t val = pmd_val(pmd), oldval = val;
|
||||
|
||||
val &= _HPAGE_CHG_MASK;
|
||||
val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
|
||||
|
||||
val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
|
||||
return __pmd(val);
|
||||
}
|
||||
|
||||
|
@ -1010,6 +1054,15 @@ static inline u16 pte_flags_pkey(unsigned long pte_flags)
|
|||
#endif
|
||||
}
|
||||
|
||||
|
||||
#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
|
||||
extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
|
||||
|
||||
static inline bool arch_has_pfn_modify_check(void)
|
||||
{
|
||||
return boot_cpu_has_bug(X86_BUG_L1TF);
|
||||
}
|
||||
|
||||
#include <asm-generic/pgtable.h>
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -166,29 +166,49 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
|
|||
/*
|
||||
* Encode and de-code a swap entry
|
||||
*
|
||||
* | ... | 11| 10| 9|8|7|6|5| 4| 3|2|1|0| <- bit number
|
||||
* | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U|W|P| <- bit names
|
||||
* | OFFSET (14->63) | TYPE (9-13) |0|X|X|X| X| X|X|X|0| <- swp entry
|
||||
* | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
|
||||
* | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
|
||||
* | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry
|
||||
*
|
||||
* G (8) is aliased and used as a PROT_NONE indicator for
|
||||
* !present ptes. We need to start storing swap entries above
|
||||
* there. We also need to avoid using A and D because of an
|
||||
* erratum where they can be incorrectly set by hardware on
|
||||
* non-present PTEs.
|
||||
*
|
||||
* SD (1) in swp entry is used to store soft dirty bit, which helps us
|
||||
* remember soft dirty over page migration
|
||||
*
|
||||
* Bit 7 in swp entry should be 0 because pmd_present checks not only P,
|
||||
* but also L and G.
|
||||
*
|
||||
* The offset is inverted by a binary not operation to make the high
|
||||
* physical bits set.
|
||||
*/
|
||||
#define SWP_TYPE_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
|
||||
#define SWP_TYPE_BITS 5
|
||||
/* Place the offset above the type: */
|
||||
#define SWP_OFFSET_FIRST_BIT (SWP_TYPE_FIRST_BIT + SWP_TYPE_BITS)
|
||||
#define SWP_TYPE_BITS 5
|
||||
|
||||
#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
|
||||
|
||||
/* We always extract/encode the offset by shifting it all the way up, and then down again */
|
||||
#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
|
||||
|
||||
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
|
||||
|
||||
#define __swp_type(x) (((x).val >> (SWP_TYPE_FIRST_BIT)) \
|
||||
& ((1U << SWP_TYPE_BITS) - 1))
|
||||
#define __swp_offset(x) ((x).val >> SWP_OFFSET_FIRST_BIT)
|
||||
#define __swp_entry(type, offset) ((swp_entry_t) { \
|
||||
((type) << (SWP_TYPE_FIRST_BIT)) \
|
||||
| ((offset) << SWP_OFFSET_FIRST_BIT) })
|
||||
/* Extract the high bits for type */
|
||||
#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
|
||||
|
||||
/* Shift up (to get rid of type), then down to get value */
|
||||
#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
|
||||
|
||||
/*
|
||||
* Shift the offset up "too far" by TYPE bits, then down again
|
||||
* The offset is inverted by a binary not operation to make the high
|
||||
* physical bits set.
|
||||
*/
|
||||
#define __swp_entry(type, offset) ((swp_entry_t) { \
|
||||
(~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
|
||||
| ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
|
||||
|
||||
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
|
||||
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
|
||||
|
||||
|
@ -215,6 +235,8 @@ extern void cleanup_highmap(void);
|
|||
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
|
||||
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
|
||||
|
||||
#include <asm/pgtable-invert.h>
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_X86_PGTABLE_64_H */
|
||||
|
|
|
@ -97,15 +97,15 @@
|
|||
/*
|
||||
* Tracking soft dirty bit when a page goes to a swap is tricky.
|
||||
* We need a bit which can be stored in pte _and_ not conflict
|
||||
* with swap entry format. On x86 bits 6 and 7 are *not* involved
|
||||
* into swap entry computation, but bit 6 is used for nonlinear
|
||||
* file mapping, so we borrow bit 7 for soft dirty tracking.
|
||||
* with swap entry format. On x86 bits 1-4 are *not* involved
|
||||
* into swap entry computation, but bit 7 is used for thp migration,
|
||||
* so we borrow bit 1 for soft dirty tracking.
|
||||
*
|
||||
* Please note that this bit must be treated as swap dirty page
|
||||
* mark if and only if the PTE has present bit clear!
|
||||
* mark if and only if the PTE/PMD has present bit clear!
|
||||
*/
|
||||
#ifdef CONFIG_MEM_SOFT_DIRTY
|
||||
#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
|
||||
#define _PAGE_SWP_SOFT_DIRTY _PAGE_RW
|
||||
#else
|
||||
#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
|
||||
#endif
|
||||
|
|
|
@ -173,6 +173,11 @@ extern const struct seq_operations cpuinfo_op;
|
|||
|
||||
extern void cpu_detect(struct cpuinfo_x86 *c);
|
||||
|
||||
static inline unsigned long l1tf_pfn_limit(void)
|
||||
{
|
||||
return BIT(boot_cpu_data.x86_phys_bits - 1 - PAGE_SHIFT) - 1;
|
||||
}
|
||||
|
||||
extern void early_cpu_init(void);
|
||||
extern void identify_boot_cpu(void);
|
||||
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
|
||||
|
@ -855,4 +860,16 @@ bool xen_set_default_idle(void);
|
|||
|
||||
void stop_this_cpu(void *dummy);
|
||||
void df_debug(struct pt_regs *regs, long error_code);
|
||||
|
||||
enum l1tf_mitigations {
|
||||
L1TF_MITIGATION_OFF,
|
||||
L1TF_MITIGATION_FLUSH_NOWARN,
|
||||
L1TF_MITIGATION_FLUSH,
|
||||
L1TF_MITIGATION_FLUSH_NOSMT,
|
||||
L1TF_MITIGATION_FULL,
|
||||
L1TF_MITIGATION_FULL_FORCE
|
||||
};
|
||||
|
||||
extern enum l1tf_mitigations l1tf_mitigation;
|
||||
|
||||
#endif /* _ASM_X86_PROCESSOR_H */
|
||||
|
|
|
@ -156,7 +156,6 @@ static inline int wbinvd_on_all_cpus(void)
|
|||
wbinvd();
|
||||
return 0;
|
||||
}
|
||||
#define smp_num_siblings 1
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
extern unsigned disabled_cpus;
|
||||
|
|
|
@ -129,13 +129,17 @@ static inline int topology_max_smt_threads(void)
|
|||
}
|
||||
|
||||
int topology_update_package_map(unsigned int apicid, unsigned int cpu);
|
||||
extern int topology_phys_to_logical_pkg(unsigned int pkg);
|
||||
int topology_phys_to_logical_pkg(unsigned int pkg);
|
||||
bool topology_is_primary_thread(unsigned int cpu);
|
||||
bool topology_smt_supported(void);
|
||||
#else
|
||||
#define topology_max_packages() (1)
|
||||
static inline int
|
||||
topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; }
|
||||
static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
|
||||
static inline int topology_max_smt_threads(void) { return 1; }
|
||||
static inline bool topology_is_primary_thread(unsigned int cpu) { return true; }
|
||||
static inline bool topology_smt_supported(void) { return false; }
|
||||
#endif
|
||||
|
||||
static inline void arch_fix_phys_package_id(int num, u32 slot)
|
||||
|
|
|
@ -499,4 +499,15 @@ enum vm_instruction_error_number {
|
|||
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
|
||||
};
|
||||
|
||||
enum vmx_l1d_flush_state {
|
||||
VMENTER_L1D_FLUSH_AUTO,
|
||||
VMENTER_L1D_FLUSH_NEVER,
|
||||
VMENTER_L1D_FLUSH_COND,
|
||||
VMENTER_L1D_FLUSH_ALWAYS,
|
||||
VMENTER_L1D_FLUSH_EPT_DISABLED,
|
||||
VMENTER_L1D_FLUSH_NOT_REQUIRED,
|
||||
};
|
||||
|
||||
extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <linux/dmi.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#include <asm/trace/irq_vectors.h>
|
||||
#include <asm/irq_remapping.h>
|
||||
|
@ -55,6 +56,7 @@
|
|||
#include <asm/mce.h>
|
||||
#include <asm/tsc.h>
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/irq_regs.h>
|
||||
|
||||
unsigned int num_processors;
|
||||
|
||||
|
@ -2041,6 +2043,23 @@ static int cpuid_to_apicid[] = {
|
|||
[0 ... NR_CPUS - 1] = -1,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/**
|
||||
* apic_id_is_primary_thread - Check whether APIC ID belongs to a primary thread
|
||||
* @id: APIC ID to check
|
||||
*/
|
||||
bool apic_id_is_primary_thread(unsigned int apicid)
|
||||
{
|
||||
u32 mask;
|
||||
|
||||
if (smp_num_siblings == 1)
|
||||
return true;
|
||||
/* Isolate the SMT bit(s) in the APICID and check for 0 */
|
||||
mask = (1U << (fls(smp_num_siblings) - 1)) - 1;
|
||||
return !(apicid & mask);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Should use this API to allocate logical CPU IDs to keep nr_logical_cpuids
|
||||
* and cpuid_to_apicid[] synchronized.
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
#include <linux/device.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/htirq.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#include <asm/irqdomain.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/apic.h>
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/sched.h>
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
*/
|
||||
#include <linux/mm.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dmar.h>
|
||||
#include <linux/hpet.h>
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/slab.h>
|
||||
|
|
|
@ -296,13 +296,34 @@ static int nearby_node(int apicid)
|
|||
}
|
||||
#endif
|
||||
|
||||
static void amd_get_topology_early(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (cpu_has(c, X86_FEATURE_TOPOEXT))
|
||||
smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fix up cpu_core_id for pre-F17h systems to be in the
|
||||
* [0 .. cores_per_node - 1] range. Not really needed but
|
||||
* kept so as not to break existing setups.
|
||||
*/
|
||||
static void legacy_fixup_core_id(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 cus_per_node;
|
||||
|
||||
if (c->x86 >= 0x17)
|
||||
return;
|
||||
|
||||
cus_per_node = c->x86_max_cores / nodes_per_socket;
|
||||
c->cpu_core_id %= cus_per_node;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fixup core topology information for
|
||||
* (1) AMD multi-node processors
|
||||
* Assumption: Number of cores in each internal node is the same.
|
||||
* (2) AMD processors supporting compute units
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
static void amd_get_topology(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u8 node_id;
|
||||
|
@ -315,7 +336,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
|
|||
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
node_id = ecx & 0xff;
|
||||
smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
|
||||
|
||||
if (c->x86 == 0x15)
|
||||
c->cu_id = ebx & 0xff;
|
||||
|
@ -353,18 +373,11 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
|
|||
} else
|
||||
return;
|
||||
|
||||
/* fixup multi-node processor information */
|
||||
if (nodes_per_socket > 1) {
|
||||
u32 cus_per_node;
|
||||
|
||||
set_cpu_cap(c, X86_FEATURE_AMD_DCM);
|
||||
cus_per_node = c->x86_max_cores / nodes_per_socket;
|
||||
|
||||
/* core id has to be in the [0 .. cores_per_node - 1] range */
|
||||
c->cpu_core_id %= cus_per_node;
|
||||
legacy_fixup_core_id(c);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* On a AMD dual core setup the lower bits of the APIC id distinguish the cores.
|
||||
|
@ -372,7 +385,6 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
|
|||
*/
|
||||
static void amd_detect_cmp(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned bits;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
|
@ -384,16 +396,11 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
|
|||
/* use socket ID also for last level cache */
|
||||
per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
|
||||
amd_get_topology(c);
|
||||
#endif
|
||||
}
|
||||
|
||||
u16 amd_get_nb_id(int cpu)
|
||||
{
|
||||
u16 id = 0;
|
||||
#ifdef CONFIG_SMP
|
||||
id = per_cpu(cpu_llc_id, cpu);
|
||||
#endif
|
||||
return id;
|
||||
return per_cpu(cpu_llc_id, cpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(amd_get_nb_id);
|
||||
|
||||
|
@ -567,6 +574,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
|
|||
|
||||
static void early_init_amd(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 value;
|
||||
|
||||
early_init_amd_mc(c);
|
||||
|
||||
/*
|
||||
|
@ -633,6 +642,23 @@ static void early_init_amd(struct cpuinfo_x86 *c)
|
|||
*/
|
||||
if (cpu_has_amd_erratum(c, amd_erratum_400))
|
||||
set_cpu_bug(c, X86_BUG_AMD_E400);
|
||||
|
||||
|
||||
/* Re-enable TopologyExtensions if switched off by BIOS */
|
||||
if (c->x86 == 0x15 &&
|
||||
(c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
|
||||
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
|
||||
|
||||
if (msr_set_bit(0xc0011005, 54) > 0) {
|
||||
rdmsrl(0xc0011005, value);
|
||||
if (value & BIT_64(54)) {
|
||||
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
|
||||
pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
amd_get_topology_early(c);
|
||||
}
|
||||
|
||||
static void init_amd_k8(struct cpuinfo_x86 *c)
|
||||
|
@ -724,19 +750,6 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
|
|||
{
|
||||
u64 value;
|
||||
|
||||
/* re-enable TopologyExtensions if switched off by BIOS */
|
||||
if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) &&
|
||||
!cpu_has(c, X86_FEATURE_TOPOEXT)) {
|
||||
|
||||
if (msr_set_bit(0xc0011005, 54) > 0) {
|
||||
rdmsrl(0xc0011005, value);
|
||||
if (value & BIT_64(54)) {
|
||||
set_cpu_cap(c, X86_FEATURE_TOPOEXT);
|
||||
pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The way access filter has a performance penalty on some workloads.
|
||||
* Disable it on the affected CPUs.
|
||||
|
@ -799,15 +812,8 @@ static void init_amd(struct cpuinfo_x86 *c)
|
|||
|
||||
cpu_detect_cache_sizes(c);
|
||||
|
||||
/* Multi core CPU? */
|
||||
if (c->extended_cpuid_level >= 0x80000008) {
|
||||
amd_detect_cmp(c);
|
||||
srat_detect_node(c);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
detect_ht(c);
|
||||
#endif
|
||||
amd_detect_cmp(c);
|
||||
srat_detect_node(c);
|
||||
|
||||
init_amd_cacheinfo(c);
|
||||
|
||||
|
|
|
@ -21,14 +21,17 @@
|
|||
#include <asm/processor-flags.h>
|
||||
#include <asm/fpu/internal.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/vmx.h>
|
||||
#include <asm/paravirt.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/e820.h>
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void);
|
||||
static void __init ssb_select_mitigation(void);
|
||||
static void __init l1tf_select_mitigation(void);
|
||||
|
||||
/*
|
||||
* Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
|
||||
|
@ -54,6 +57,12 @@ void __init check_bugs(void)
|
|||
{
|
||||
identify_boot_cpu();
|
||||
|
||||
/*
|
||||
* identify_boot_cpu() initialized SMT support information, let the
|
||||
* core code know.
|
||||
*/
|
||||
cpu_smt_check_topology_early();
|
||||
|
||||
if (!IS_ENABLED(CONFIG_SMP)) {
|
||||
pr_info("CPU: ");
|
||||
print_cpu_info(&boot_cpu_data);
|
||||
|
@ -80,6 +89,8 @@ void __init check_bugs(void)
|
|||
*/
|
||||
ssb_select_mitigation();
|
||||
|
||||
l1tf_select_mitigation();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Check whether we are able to run this kernel safely on SMP.
|
||||
|
@ -310,23 +321,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
|||
return cmd;
|
||||
}
|
||||
|
||||
/* Check for Skylake-like CPUs (for RSB handling) */
|
||||
static bool __init is_skylake_era(void)
|
||||
{
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
|
||||
boot_cpu_data.x86 == 6) {
|
||||
switch (boot_cpu_data.x86_model) {
|
||||
case INTEL_FAM6_SKYLAKE_MOBILE:
|
||||
case INTEL_FAM6_SKYLAKE_DESKTOP:
|
||||
case INTEL_FAM6_SKYLAKE_X:
|
||||
case INTEL_FAM6_KABYLAKE_MOBILE:
|
||||
case INTEL_FAM6_KABYLAKE_DESKTOP:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void __init spectre_v2_select_mitigation(void)
|
||||
{
|
||||
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
|
||||
|
@ -387,22 +381,15 @@ retpoline_auto:
|
|||
pr_info("%s\n", spectre_v2_strings[mode]);
|
||||
|
||||
/*
|
||||
* If neither SMEP nor PTI are available, there is a risk of
|
||||
* hitting userspace addresses in the RSB after a context switch
|
||||
* from a shallow call stack to a deeper one. To prevent this fill
|
||||
* the entire RSB, even when using IBRS.
|
||||
* If spectre v2 protection has been enabled, unconditionally fill
|
||||
* RSB during a context switch; this protects against two independent
|
||||
* issues:
|
||||
*
|
||||
* Skylake era CPUs have a separate issue with *underflow* of the
|
||||
* RSB, when they will predict 'ret' targets from the generic BTB.
|
||||
* The proper mitigation for this is IBRS. If IBRS is not supported
|
||||
* or deactivated in favour of retpolines the RSB fill on context
|
||||
* switch is required.
|
||||
* - RSB underflow (and switch to BTB) on Skylake+
|
||||
* - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
|
||||
*/
|
||||
if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
|
||||
!boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||
pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
|
||||
}
|
||||
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
|
||||
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
|
||||
|
||||
/* Initialize Indirect Branch Prediction Barrier if supported */
|
||||
if (boot_cpu_has(X86_FEATURE_IBPB)) {
|
||||
|
@ -653,8 +640,121 @@ void x86_spec_ctrl_setup_ap(void)
|
|||
x86_amd_ssb_disable();
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "L1TF: " fmt
|
||||
|
||||
/* Default mitigation for L1TF-affected CPUs */
|
||||
enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
|
||||
#if IS_ENABLED(CONFIG_KVM_INTEL)
|
||||
EXPORT_SYMBOL_GPL(l1tf_mitigation);
|
||||
|
||||
enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
|
||||
EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
|
||||
#endif
|
||||
|
||||
static void __init l1tf_select_mitigation(void)
|
||||
{
|
||||
u64 half_pa;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_L1TF))
|
||||
return;
|
||||
|
||||
switch (l1tf_mitigation) {
|
||||
case L1TF_MITIGATION_OFF:
|
||||
case L1TF_MITIGATION_FLUSH_NOWARN:
|
||||
case L1TF_MITIGATION_FLUSH:
|
||||
break;
|
||||
case L1TF_MITIGATION_FLUSH_NOSMT:
|
||||
case L1TF_MITIGATION_FULL:
|
||||
cpu_smt_disable(false);
|
||||
break;
|
||||
case L1TF_MITIGATION_FULL_FORCE:
|
||||
cpu_smt_disable(true);
|
||||
break;
|
||||
}
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 2
|
||||
pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
|
||||
return;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This is extremely unlikely to happen because almost all
|
||||
* systems have far more MAX_PA/2 than RAM can be fit into
|
||||
* DIMM slots.
|
||||
*/
|
||||
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
|
||||
if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
|
||||
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
|
||||
}
|
||||
|
||||
static int __init l1tf_cmdline(char *str)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_L1TF))
|
||||
return 0;
|
||||
|
||||
if (!str)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcmp(str, "off"))
|
||||
l1tf_mitigation = L1TF_MITIGATION_OFF;
|
||||
else if (!strcmp(str, "flush,nowarn"))
|
||||
l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
|
||||
else if (!strcmp(str, "flush"))
|
||||
l1tf_mitigation = L1TF_MITIGATION_FLUSH;
|
||||
else if (!strcmp(str, "flush,nosmt"))
|
||||
l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
|
||||
else if (!strcmp(str, "full"))
|
||||
l1tf_mitigation = L1TF_MITIGATION_FULL;
|
||||
else if (!strcmp(str, "full,force"))
|
||||
l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("l1tf", l1tf_cmdline);
|
||||
|
||||
#undef pr_fmt
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
|
||||
#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
|
||||
|
||||
#if IS_ENABLED(CONFIG_KVM_INTEL)
|
||||
static const char *l1tf_vmx_states[] = {
|
||||
[VMENTER_L1D_FLUSH_AUTO] = "auto",
|
||||
[VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
|
||||
[VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
|
||||
[VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
|
||||
[VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
|
||||
[VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
|
||||
};
|
||||
|
||||
static ssize_t l1tf_show_state(char *buf)
|
||||
{
|
||||
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
|
||||
return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
|
||||
|
||||
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
|
||||
(l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
|
||||
cpu_smt_control == CPU_SMT_ENABLED))
|
||||
return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
|
||||
l1tf_vmx_states[l1tf_vmx_mitigation]);
|
||||
|
||||
return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
|
||||
l1tf_vmx_states[l1tf_vmx_mitigation],
|
||||
cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled");
|
||||
}
|
||||
#else
|
||||
static ssize_t l1tf_show_state(char *buf)
|
||||
{
|
||||
return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
|
||||
}
|
||||
#endif
|
||||
|
||||
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
||||
char *buf, unsigned int bug)
|
||||
{
|
||||
|
@ -680,6 +780,10 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
|||
case X86_BUG_SPEC_STORE_BYPASS:
|
||||
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
|
||||
|
||||
case X86_BUG_L1TF:
|
||||
if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
|
||||
return l1tf_show_state(buf);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -706,4 +810,9 @@ ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *
|
|||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -61,6 +61,13 @@ cpumask_var_t cpu_callin_mask;
|
|||
/* representing cpus for which sibling maps can be computed */
|
||||
cpumask_var_t cpu_sibling_setup_mask;
|
||||
|
||||
/* Number of siblings per CPU package */
|
||||
int smp_num_siblings = 1;
|
||||
EXPORT_SYMBOL(smp_num_siblings);
|
||||
|
||||
/* Last level cache ID of each logical CPU */
|
||||
DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
|
||||
|
||||
/* correctly size the local cpu masks */
|
||||
void __init setup_cpu_local_masks(void)
|
||||
{
|
||||
|
@ -606,33 +613,36 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c)
|
|||
tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
|
||||
}
|
||||
|
||||
void detect_ht(struct cpuinfo_x86 *c)
|
||||
int detect_ht_early(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
u32 eax, ebx, ecx, edx;
|
||||
int index_msb, core_bits;
|
||||
static bool printed;
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_HT))
|
||||
return;
|
||||
return -1;
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
|
||||
goto out;
|
||||
return -1;
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_XTOPOLOGY))
|
||||
return;
|
||||
return -1;
|
||||
|
||||
cpuid(1, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
smp_num_siblings = (ebx & 0xff0000) >> 16;
|
||||
|
||||
if (smp_num_siblings == 1) {
|
||||
if (smp_num_siblings == 1)
|
||||
pr_info_once("CPU0: Hyper-Threading is disabled\n");
|
||||
goto out;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (smp_num_siblings <= 1)
|
||||
goto out;
|
||||
void detect_ht(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
int index_msb, core_bits;
|
||||
|
||||
if (detect_ht_early(c) < 0)
|
||||
return;
|
||||
|
||||
index_msb = get_count_order(smp_num_siblings);
|
||||
c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
|
||||
|
@ -645,15 +655,6 @@ void detect_ht(struct cpuinfo_x86 *c)
|
|||
|
||||
c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
|
||||
((1 << core_bits) - 1);
|
||||
|
||||
out:
|
||||
if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
|
||||
pr_info("CPU: Physical Processor ID: %d\n",
|
||||
c->phys_proc_id);
|
||||
pr_info("CPU: Processor Core ID: %d\n",
|
||||
c->cpu_core_id);
|
||||
printed = 1;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -925,6 +926,21 @@ static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
|
|||
{}
|
||||
};
|
||||
|
||||
static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
|
||||
/* in addition to cpu_no_speculation */
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
|
||||
{}
|
||||
};
|
||||
|
||||
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 ia32_cap = 0;
|
||||
|
@ -950,6 +966,11 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|||
return;
|
||||
|
||||
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
||||
|
||||
if (x86_match_cpu(cpu_no_l1tf))
|
||||
return;
|
||||
|
||||
setup_force_cpu_bug(X86_BUG_L1TF);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -46,6 +46,8 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
|
|||
|
||||
extern void get_cpu_cap(struct cpuinfo_x86 *c);
|
||||
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
|
||||
extern int detect_extended_topology_early(struct cpuinfo_x86 *c);
|
||||
extern int detect_ht_early(struct cpuinfo_x86 *c);
|
||||
|
||||
extern void x86_spec_ctrl_setup_ap(void);
|
||||
|
||||
|
|
|
@ -283,6 +283,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
|
|||
}
|
||||
|
||||
check_mpx_erratum(c);
|
||||
|
||||
/*
|
||||
* Get the number of SMT siblings early from the extended topology
|
||||
* leaf, if available. Otherwise try the legacy SMT detection.
|
||||
*/
|
||||
if (detect_extended_topology_early(c) < 0)
|
||||
detect_ht_early(c);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
|
|
@ -384,6 +384,24 @@ static void __exit microcode_dev_exit(void)
|
|||
/* fake device for request_firmware */
|
||||
static struct platform_device *microcode_pdev;
|
||||
|
||||
static int check_online_cpus(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
/*
|
||||
* Make sure all CPUs are online. It's fine for SMT to be disabled if
|
||||
* all the primary threads are still online.
|
||||
*/
|
||||
for_each_present_cpu(cpu) {
|
||||
if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) {
|
||||
pr_err("Not all CPUs online, aborting microcode update.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int reload_for_cpu(int cpu)
|
||||
{
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
|
@ -418,7 +436,13 @@ static ssize_t reload_store(struct device *dev,
|
|||
return size;
|
||||
|
||||
get_online_cpus();
|
||||
|
||||
ret = check_online_cpus();
|
||||
if (ret)
|
||||
goto put;
|
||||
|
||||
mutex_lock(µcode_mutex);
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
tmp_ret = reload_for_cpu(cpu);
|
||||
if (tmp_ret != 0)
|
||||
|
@ -431,6 +455,8 @@ static ssize_t reload_store(struct device *dev,
|
|||
if (!ret)
|
||||
perf_check_microcode();
|
||||
mutex_unlock(µcode_mutex);
|
||||
|
||||
put:
|
||||
put_online_cpus();
|
||||
|
||||
if (!ret)
|
||||
|
|
|
@ -26,16 +26,13 @@
|
|||
* exists, use it for populating initial_apicid and cpu topology
|
||||
* detection.
|
||||
*/
|
||||
void detect_extended_topology(struct cpuinfo_x86 *c)
|
||||
int detect_extended_topology_early(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned int eax, ebx, ecx, edx, sub_index;
|
||||
unsigned int ht_mask_width, core_plus_mask_width;
|
||||
unsigned int core_select_mask, core_level_siblings;
|
||||
static bool printed;
|
||||
unsigned int eax, ebx, ecx, edx;
|
||||
|
||||
if (c->cpuid_level < 0xb)
|
||||
return;
|
||||
return -1;
|
||||
|
||||
cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
|
@ -43,7 +40,7 @@ void detect_extended_topology(struct cpuinfo_x86 *c)
|
|||
* check if the cpuid leaf 0xb is actually implemented.
|
||||
*/
|
||||
if (ebx == 0 || (LEAFB_SUBTYPE(ecx) != SMT_TYPE))
|
||||
return;
|
||||
return -1;
|
||||
|
||||
set_cpu_cap(c, X86_FEATURE_XTOPOLOGY);
|
||||
|
||||
|
@ -51,10 +48,30 @@ void detect_extended_topology(struct cpuinfo_x86 *c)
|
|||
* initial apic id, which also represents 32-bit extended x2apic id.
|
||||
*/
|
||||
c->initial_apicid = edx;
|
||||
smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for extended topology enumeration cpuid leaf 0xb and if it
|
||||
* exists, use it for populating initial_apicid and cpu topology
|
||||
* detection.
|
||||
*/
|
||||
void detect_extended_topology(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned int eax, ebx, ecx, edx, sub_index;
|
||||
unsigned int ht_mask_width, core_plus_mask_width;
|
||||
unsigned int core_select_mask, core_level_siblings;
|
||||
|
||||
if (detect_extended_topology_early(c) < 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Populate HT related information from sub-leaf level 0.
|
||||
*/
|
||||
cpuid_count(0xb, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
|
||||
core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
|
||||
core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
|
||||
|
||||
|
@ -85,15 +102,5 @@ void detect_extended_topology(struct cpuinfo_x86 *c)
|
|||
c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
|
||||
|
||||
c->x86_max_cores = (core_level_siblings / smp_num_siblings);
|
||||
|
||||
if (!printed) {
|
||||
pr_info("CPU: Physical Processor ID: %d\n",
|
||||
c->phys_proc_id);
|
||||
if (c->x86_max_cores > 1)
|
||||
pr_info("CPU: Processor Core ID: %d\n",
|
||||
c->cpu_core_id);
|
||||
printed = 1;
|
||||
}
|
||||
return;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <asm/fpu/signal.h>
|
||||
#include <asm/fpu/types.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/irq_regs.h>
|
||||
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/pkeys.h>
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/nops.h>
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#include <linux/clocksource.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/errno.h>
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/timex.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/init.h>
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/ftrace.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#include <asm/apic.h>
|
||||
#include <asm/io_apic.h>
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/cpu.h>
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/ftrace.h>
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/timex.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
|
|
@ -61,6 +61,7 @@
|
|||
#include <asm/alternative.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
|
@ -396,7 +397,6 @@ int __copy_instruction(u8 *dest, u8 *src)
|
|||
newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
|
||||
if ((s64) (s32) newdisp != newdisp) {
|
||||
pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
|
||||
pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value);
|
||||
return 0;
|
||||
}
|
||||
disp = (u8 *) dest + insn_offset_displacement(&insn);
|
||||
|
@ -612,8 +612,7 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
|
|||
* Raise a BUG or we'll continue in an endless reentering loop
|
||||
* and eventually a stack overflow.
|
||||
*/
|
||||
printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
|
||||
p->addr);
|
||||
pr_err("Unrecoverable kprobe detected.\n");
|
||||
dump_kprobe(p);
|
||||
BUG();
|
||||
default:
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include <asm/insn.h>
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
|
|
|
@ -88,10 +88,12 @@ unsigned paravirt_patch_call(void *insnbuf,
|
|||
struct branch *b = insnbuf;
|
||||
unsigned long delta = (unsigned long)target - (addr+5);
|
||||
|
||||
if (tgt_clobbers & ~site_clobbers)
|
||||
return len; /* target would clobber too much for this site */
|
||||
if (len < 5)
|
||||
if (len < 5) {
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
|
||||
#endif
|
||||
return len; /* call too long for patch site */
|
||||
}
|
||||
|
||||
b->opcode = 0xe8; /* call */
|
||||
b->delta = delta;
|
||||
|
@ -106,8 +108,12 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
|
|||
struct branch *b = insnbuf;
|
||||
unsigned long delta = (unsigned long)target - (addr+5);
|
||||
|
||||
if (len < 5)
|
||||
if (len < 5) {
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
|
||||
#endif
|
||||
return len; /* call too long for patch site */
|
||||
}
|
||||
|
||||
b->opcode = 0xe9; /* jmp */
|
||||
b->delta = delta;
|
||||
|
|
|
@ -854,6 +854,12 @@ void __init setup_arch(char **cmdline_p)
|
|||
memblock_reserve(__pa_symbol(_text),
|
||||
(unsigned long)__bss_stop - (unsigned long)_text);
|
||||
|
||||
/*
|
||||
* Make sure page 0 is always reserved because on systems with
|
||||
* L1TF its contents can be leaked to user processes.
|
||||
*/
|
||||
memblock_reserve(0, PAGE_SIZE);
|
||||
|
||||
early_reserve_initrd();
|
||||
|
||||
/*
|
||||
|
|
|
@ -271,6 +271,7 @@ __visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs)
|
|||
/*
|
||||
* KVM uses this interrupt to force a cpu out of guest mode
|
||||
*/
|
||||
kvm_set_cpu_l1tf_flush_l1d();
|
||||
}
|
||||
|
||||
__visible void __irq_entry smp_trace_reschedule_interrupt(struct pt_regs *regs)
|
||||
|
|
|
@ -76,13 +76,7 @@
|
|||
#include <asm/realmode.h>
|
||||
#include <asm/misc.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
|
||||
/* Number of siblings per CPU package */
|
||||
int smp_num_siblings = 1;
|
||||
EXPORT_SYMBOL(smp_num_siblings);
|
||||
|
||||
/* Last level cache ID of each logical CPU */
|
||||
DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
|
||||
#include <asm/hw_irq.h>
|
||||
|
||||
/* representing HT siblings of each logical CPU */
|
||||
DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
|
||||
|
@ -295,6 +289,23 @@ found:
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* topology_is_primary_thread - Check whether CPU is the primary SMT thread
|
||||
* @cpu: CPU to check
|
||||
*/
|
||||
bool topology_is_primary_thread(unsigned int cpu)
|
||||
{
|
||||
return apic_id_is_primary_thread(per_cpu(x86_cpu_to_apicid, cpu));
|
||||
}
|
||||
|
||||
/**
|
||||
* topology_smt_supported - Check whether SMT is supported by the CPUs
|
||||
*/
|
||||
bool topology_smt_supported(void)
|
||||
{
|
||||
return smp_num_siblings > 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* topology_phys_to_logical_pkg - Map a physical package id to a logical
|
||||
*
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/i8253.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/export.h>
|
||||
|
|
|
@ -175,6 +175,8 @@ struct vcpu_svm {
|
|||
uint64_t sysenter_eip;
|
||||
uint64_t tsc_aux;
|
||||
|
||||
u64 msr_decfg;
|
||||
|
||||
u64 next_rip;
|
||||
|
||||
u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
|
||||
|
@ -1567,6 +1569,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|||
u32 dummy;
|
||||
u32 eax = 1;
|
||||
|
||||
vcpu->arch.microcode_version = 0x01000065;
|
||||
svm->spec_ctrl = 0;
|
||||
svm->virt_spec_ctrl = 0;
|
||||
|
||||
|
@ -2124,6 +2127,8 @@ static int pf_interception(struct vcpu_svm *svm)
|
|||
u32 error_code;
|
||||
int r = 1;
|
||||
|
||||
svm->vcpu.arch.l1tf_flush_l1d = true;
|
||||
|
||||
switch (svm->apf_reason) {
|
||||
default:
|
||||
error_code = svm->vmcb->control.exit_info_1;
|
||||
|
@ -3483,6 +3488,22 @@ static int cr8_write_interception(struct vcpu_svm *svm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int svm_get_msr_feature(struct kvm_msr_entry *msr)
|
||||
{
|
||||
msr->data = 0;
|
||||
|
||||
switch (msr->index) {
|
||||
case MSR_F10H_DECFG:
|
||||
if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
|
||||
msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
|
||||
break;
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
@ -3565,9 +3586,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
|
||||
msr_info->data = svm->virt_spec_ctrl;
|
||||
break;
|
||||
case MSR_IA32_UCODE_REV:
|
||||
msr_info->data = 0x01000065;
|
||||
break;
|
||||
case MSR_F15H_IC_CFG: {
|
||||
|
||||
int family, model;
|
||||
|
@ -3585,6 +3603,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
msr_info->data = 0x1E;
|
||||
}
|
||||
break;
|
||||
case MSR_F10H_DECFG:
|
||||
msr_info->data = svm->msr_decfg;
|
||||
break;
|
||||
default:
|
||||
return kvm_get_msr_common(vcpu, msr_info);
|
||||
}
|
||||
|
@ -3773,6 +3794,24 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
|
|||
case MSR_VM_IGNNE:
|
||||
vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
|
||||
break;
|
||||
case MSR_F10H_DECFG: {
|
||||
struct kvm_msr_entry msr_entry;
|
||||
|
||||
msr_entry.index = msr->index;
|
||||
if (svm_get_msr_feature(&msr_entry))
|
||||
return 1;
|
||||
|
||||
/* Check the supported bits */
|
||||
if (data & ~msr_entry.data)
|
||||
return 1;
|
||||
|
||||
/* Don't allow the guest to change a bit, #GP */
|
||||
if (!msr->host_initiated && (data ^ msr_entry.data))
|
||||
return 1;
|
||||
|
||||
svm->msr_decfg = data;
|
||||
break;
|
||||
}
|
||||
case MSR_IA32_APICBASE:
|
||||
if (kvm_vcpu_apicv_active(vcpu))
|
||||
avic_update_vapic_bar(to_svm(vcpu), data);
|
||||
|
@ -5502,6 +5541,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
|
|||
.vcpu_unblocking = svm_vcpu_unblocking,
|
||||
|
||||
.update_bp_intercept = update_bp_intercept,
|
||||
.get_msr_feature = svm_get_msr_feature,
|
||||
.get_msr = svm_get_msr,
|
||||
.set_msr = svm_set_msr,
|
||||
.get_segment_base = svm_get_segment_base,
|
||||
|
|
|
@ -189,6 +189,150 @@ module_param(ple_window_max, int, S_IRUGO);
|
|||
|
||||
extern const ulong vmx_return;
|
||||
|
||||
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
|
||||
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
|
||||
static DEFINE_MUTEX(vmx_l1d_flush_mutex);
|
||||
|
||||
/* Storage for pre module init parameter parsing */
|
||||
static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
|
||||
|
||||
static const struct {
|
||||
const char *option;
|
||||
enum vmx_l1d_flush_state cmd;
|
||||
} vmentry_l1d_param[] = {
|
||||
{"auto", VMENTER_L1D_FLUSH_AUTO},
|
||||
{"never", VMENTER_L1D_FLUSH_NEVER},
|
||||
{"cond", VMENTER_L1D_FLUSH_COND},
|
||||
{"always", VMENTER_L1D_FLUSH_ALWAYS},
|
||||
};
|
||||
|
||||
#define L1D_CACHE_ORDER 4
|
||||
static void *vmx_l1d_flush_pages;
|
||||
|
||||
static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned int i;
|
||||
|
||||
if (!enable_ept) {
|
||||
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
|
||||
u64 msr;
|
||||
|
||||
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
|
||||
if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
|
||||
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* If set to auto use the default l1tf mitigation method */
|
||||
if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
|
||||
switch (l1tf_mitigation) {
|
||||
case L1TF_MITIGATION_OFF:
|
||||
l1tf = VMENTER_L1D_FLUSH_NEVER;
|
||||
break;
|
||||
case L1TF_MITIGATION_FLUSH_NOWARN:
|
||||
case L1TF_MITIGATION_FLUSH:
|
||||
case L1TF_MITIGATION_FLUSH_NOSMT:
|
||||
l1tf = VMENTER_L1D_FLUSH_COND;
|
||||
break;
|
||||
case L1TF_MITIGATION_FULL:
|
||||
case L1TF_MITIGATION_FULL_FORCE:
|
||||
l1tf = VMENTER_L1D_FLUSH_ALWAYS;
|
||||
break;
|
||||
}
|
||||
} else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
|
||||
l1tf = VMENTER_L1D_FLUSH_ALWAYS;
|
||||
}
|
||||
|
||||
if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
|
||||
!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
|
||||
page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
vmx_l1d_flush_pages = page_address(page);
|
||||
|
||||
/*
|
||||
* Initialize each page with a different pattern in
|
||||
* order to protect against KSM in the nested
|
||||
* virtualization case.
|
||||
*/
|
||||
for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
|
||||
memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
|
||||
PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
l1tf_vmx_mitigation = l1tf;
|
||||
|
||||
if (l1tf != VMENTER_L1D_FLUSH_NEVER)
|
||||
static_branch_enable(&vmx_l1d_should_flush);
|
||||
else
|
||||
static_branch_disable(&vmx_l1d_should_flush);
|
||||
|
||||
if (l1tf == VMENTER_L1D_FLUSH_COND)
|
||||
static_branch_enable(&vmx_l1d_flush_cond);
|
||||
else
|
||||
static_branch_disable(&vmx_l1d_flush_cond);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmentry_l1d_flush_parse(const char *s)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (s) {
|
||||
for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
|
||||
if (sysfs_streq(s, vmentry_l1d_param[i].option))
|
||||
return vmentry_l1d_param[i].cmd;
|
||||
}
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
|
||||
{
|
||||
int l1tf, ret;
|
||||
|
||||
if (!boot_cpu_has(X86_BUG_L1TF))
|
||||
return 0;
|
||||
|
||||
l1tf = vmentry_l1d_flush_parse(s);
|
||||
if (l1tf < 0)
|
||||
return l1tf;
|
||||
|
||||
/*
|
||||
* Has vmx_init() run already? If not then this is the pre init
|
||||
* parameter parsing. In that case just store the value and let
|
||||
* vmx_init() do the proper setup after enable_ept has been
|
||||
* established.
|
||||
*/
|
||||
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
|
||||
vmentry_l1d_flush_param = l1tf;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&vmx_l1d_flush_mutex);
|
||||
ret = vmx_setup_l1d_flush(l1tf);
|
||||
mutex_unlock(&vmx_l1d_flush_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
|
||||
{
|
||||
return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
|
||||
}
|
||||
|
||||
static const struct kernel_param_ops vmentry_l1d_flush_ops = {
|
||||
.set = vmentry_l1d_flush_set,
|
||||
.get = vmentry_l1d_flush_get,
|
||||
};
|
||||
module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
|
||||
|
||||
#define NR_AUTOLOAD_MSRS 8
|
||||
|
||||
struct vmcs {
|
||||
|
@ -541,6 +685,11 @@ static inline int pi_test_sn(struct pi_desc *pi_desc)
|
|||
(unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
struct vmx_msrs {
|
||||
unsigned int nr;
|
||||
struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
|
||||
};
|
||||
|
||||
struct vcpu_vmx {
|
||||
struct kvm_vcpu vcpu;
|
||||
unsigned long host_rsp;
|
||||
|
@ -573,9 +722,8 @@ struct vcpu_vmx {
|
|||
struct loaded_vmcs *loaded_vmcs;
|
||||
bool __launched; /* temporary, used in vmx_vcpu_run */
|
||||
struct msr_autoload {
|
||||
unsigned nr;
|
||||
struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
|
||||
struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
|
||||
struct vmx_msrs guest;
|
||||
struct vmx_msrs host;
|
||||
} msr_autoload;
|
||||
struct {
|
||||
int loaded;
|
||||
|
@ -1920,9 +2068,20 @@ static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
|
|||
vm_exit_controls_clearbit(vmx, exit);
|
||||
}
|
||||
|
||||
static int find_msr(struct vmx_msrs *m, unsigned int msr)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < m->nr; ++i) {
|
||||
if (m->val[i].index == msr)
|
||||
return i;
|
||||
}
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
|
||||
{
|
||||
unsigned i;
|
||||
int i;
|
||||
struct msr_autoload *m = &vmx->msr_autoload;
|
||||
|
||||
switch (msr) {
|
||||
|
@ -1943,18 +2102,21 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
|
|||
}
|
||||
break;
|
||||
}
|
||||
i = find_msr(&m->guest, msr);
|
||||
if (i < 0)
|
||||
goto skip_guest;
|
||||
--m->guest.nr;
|
||||
m->guest.val[i] = m->guest.val[m->guest.nr];
|
||||
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
|
||||
|
||||
for (i = 0; i < m->nr; ++i)
|
||||
if (m->guest[i].index == msr)
|
||||
break;
|
||||
|
||||
if (i == m->nr)
|
||||
skip_guest:
|
||||
i = find_msr(&m->host, msr);
|
||||
if (i < 0)
|
||||
return;
|
||||
--m->nr;
|
||||
m->guest[i] = m->guest[m->nr];
|
||||
m->host[i] = m->host[m->nr];
|
||||
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
|
||||
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
|
||||
|
||||
--m->host.nr;
|
||||
m->host.val[i] = m->host.val[m->host.nr];
|
||||
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
|
||||
}
|
||||
|
||||
static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
|
||||
|
@ -1969,9 +2131,9 @@ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
|
|||
}
|
||||
|
||||
static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
|
||||
u64 guest_val, u64 host_val)
|
||||
u64 guest_val, u64 host_val, bool entry_only)
|
||||
{
|
||||
unsigned i;
|
||||
int i, j = 0;
|
||||
struct msr_autoload *m = &vmx->msr_autoload;
|
||||
|
||||
switch (msr) {
|
||||
|
@ -2006,24 +2168,31 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
|
|||
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
|
||||
}
|
||||
|
||||
for (i = 0; i < m->nr; ++i)
|
||||
if (m->guest[i].index == msr)
|
||||
break;
|
||||
i = find_msr(&m->guest, msr);
|
||||
if (!entry_only)
|
||||
j = find_msr(&m->host, msr);
|
||||
|
||||
if (i == NR_AUTOLOAD_MSRS) {
|
||||
if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
|
||||
printk_once(KERN_WARNING "Not enough msr switch entries. "
|
||||
"Can't add msr %x\n", msr);
|
||||
return;
|
||||
} else if (i == m->nr) {
|
||||
++m->nr;
|
||||
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
|
||||
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
|
||||
}
|
||||
if (i < 0) {
|
||||
i = m->guest.nr++;
|
||||
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
|
||||
}
|
||||
m->guest.val[i].index = msr;
|
||||
m->guest.val[i].value = guest_val;
|
||||
|
||||
m->guest[i].index = msr;
|
||||
m->guest[i].value = guest_val;
|
||||
m->host[i].index = msr;
|
||||
m->host[i].value = host_val;
|
||||
if (entry_only)
|
||||
return;
|
||||
|
||||
if (j < 0) {
|
||||
j = m->host.nr++;
|
||||
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
|
||||
}
|
||||
m->host.val[j].index = msr;
|
||||
m->host.val[j].value = host_val;
|
||||
}
|
||||
|
||||
static void reload_tss(void)
|
||||
|
@ -2080,7 +2249,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
|
|||
guest_efer &= ~EFER_LME;
|
||||
if (guest_efer != host_efer)
|
||||
add_atomic_switch_msr(vmx, MSR_EFER,
|
||||
guest_efer, host_efer);
|
||||
guest_efer, host_efer, false);
|
||||
return false;
|
||||
} else {
|
||||
guest_efer &= ~ignore_bits;
|
||||
|
@ -2994,6 +3163,11 @@ static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu,
|
|||
return !(val & ~valid_bits);
|
||||
}
|
||||
|
||||
static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reads an msr value (of 'msr_index') into 'pdata'.
|
||||
* Returns 0 on success, non-0 otherwise.
|
||||
|
@ -3244,7 +3418,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
vcpu->arch.ia32_xss = data;
|
||||
if (vcpu->arch.ia32_xss != host_xss)
|
||||
add_atomic_switch_msr(vmx, MSR_IA32_XSS,
|
||||
vcpu->arch.ia32_xss, host_xss);
|
||||
vcpu->arch.ia32_xss, host_xss, false);
|
||||
else
|
||||
clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
|
||||
break;
|
||||
|
@ -5265,9 +5439,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
|
|||
|
||||
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
|
||||
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
|
||||
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
|
||||
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
|
||||
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
|
||||
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
|
||||
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
|
||||
|
||||
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
|
||||
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
|
||||
|
@ -5287,8 +5461,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
|
|||
++vmx->nmsrs;
|
||||
}
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
|
||||
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, vmx->arch_capabilities);
|
||||
vmx->arch_capabilities = kvm_get_arch_capabilities();
|
||||
|
||||
vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
|
||||
|
||||
|
@ -5317,6 +5490,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|||
u64 cr0;
|
||||
|
||||
vmx->rmode.vm86_active = 0;
|
||||
vcpu->arch.microcode_version = 0x100000000ULL;
|
||||
vmx->spec_ctrl = 0;
|
||||
|
||||
vmx->soft_vnmi_blocked = 0;
|
||||
|
@ -5722,6 +5896,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
|
|||
BUG_ON(enable_ept);
|
||||
cr2 = vmcs_readl(EXIT_QUALIFICATION);
|
||||
trace_kvm_page_fault(cr2, error_code);
|
||||
vcpu->arch.l1tf_flush_l1d = true;
|
||||
|
||||
if (kvm_event_needs_reinjection(vcpu))
|
||||
kvm_mmu_unprotect_page_virt(vcpu, cr2);
|
||||
|
@ -8485,6 +8660,79 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Software based L1D cache flush which is used when microcode providing
|
||||
* the cache control MSR is not loaded.
|
||||
*
|
||||
* The L1D cache is 32 KiB on Nehalem and later microarchitectures, but to
|
||||
* flush it is required to read in 64 KiB because the replacement algorithm
|
||||
* is not exactly LRU. This could be sized at runtime via topology
|
||||
* information but as all relevant affected CPUs have 32KiB L1D cache size
|
||||
* there is no point in doing so.
|
||||
*/
|
||||
#define L1D_CACHE_ORDER 4
|
||||
static void *vmx_l1d_flush_pages;
|
||||
|
||||
static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int size = PAGE_SIZE << L1D_CACHE_ORDER;
|
||||
|
||||
/*
|
||||
* This code is only executed when the the flush mode is 'cond' or
|
||||
* 'always'
|
||||
*/
|
||||
if (static_branch_likely(&vmx_l1d_flush_cond)) {
|
||||
bool flush_l1d;
|
||||
|
||||
/*
|
||||
* Clear the per-vcpu flush bit, it gets set again
|
||||
* either from vcpu_run() or from one of the unsafe
|
||||
* VMEXIT handlers.
|
||||
*/
|
||||
flush_l1d = vcpu->arch.l1tf_flush_l1d;
|
||||
vcpu->arch.l1tf_flush_l1d = false;
|
||||
|
||||
/*
|
||||
* Clear the per-cpu flush bit, it gets set again from
|
||||
* the interrupt handlers.
|
||||
*/
|
||||
flush_l1d |= kvm_get_cpu_l1tf_flush_l1d();
|
||||
kvm_clear_cpu_l1tf_flush_l1d();
|
||||
|
||||
if (!flush_l1d)
|
||||
return;
|
||||
}
|
||||
|
||||
vcpu->stat.l1d_flush++;
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_FLUSH_L1D)) {
|
||||
wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
|
||||
return;
|
||||
}
|
||||
|
||||
asm volatile(
|
||||
/* First ensure the pages are in the TLB */
|
||||
"xorl %%eax, %%eax\n"
|
||||
".Lpopulate_tlb:\n\t"
|
||||
"movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
|
||||
"addl $4096, %%eax\n\t"
|
||||
"cmpl %%eax, %[size]\n\t"
|
||||
"jne .Lpopulate_tlb\n\t"
|
||||
"xorl %%eax, %%eax\n\t"
|
||||
"cpuid\n\t"
|
||||
/* Now fill the cache */
|
||||
"xorl %%eax, %%eax\n"
|
||||
".Lfill_cache:\n"
|
||||
"movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
|
||||
"addl $64, %%eax\n\t"
|
||||
"cmpl %%eax, %[size]\n\t"
|
||||
"jne .Lfill_cache\n\t"
|
||||
"lfence\n"
|
||||
:: [flush_pages] "r" (vmx_l1d_flush_pages),
|
||||
[size] "r" (size)
|
||||
: "eax", "ebx", "ecx", "edx");
|
||||
}
|
||||
|
||||
static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
|
||||
{
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
|
@ -8856,7 +9104,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
|
|||
clear_atomic_switch_msr(vmx, msrs[i].msr);
|
||||
else
|
||||
add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
|
||||
msrs[i].host);
|
||||
msrs[i].host, false);
|
||||
}
|
||||
|
||||
void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
|
||||
|
@ -8940,6 +9188,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
|
||||
vmx->__launched = vmx->loaded_vmcs->launched;
|
||||
|
||||
if (static_branch_unlikely(&vmx_l1d_should_flush))
|
||||
vmx_l1d_flush(vcpu);
|
||||
|
||||
asm(
|
||||
/* Store host registers */
|
||||
"push %%" _ASM_DX "; push %%" _ASM_BP ";"
|
||||
|
@ -9297,6 +9548,37 @@ free_vcpu:
|
|||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
#define L1TF_MSG_SMT "L1TF CPU bug present and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
|
||||
#define L1TF_MSG_L1D "L1TF CPU bug present and virtualization mitigation disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html for details.\n"
|
||||
|
||||
static int vmx_vm_init(struct kvm *kvm)
|
||||
{
|
||||
if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
|
||||
switch (l1tf_mitigation) {
|
||||
case L1TF_MITIGATION_OFF:
|
||||
case L1TF_MITIGATION_FLUSH_NOWARN:
|
||||
/* 'I explicitly don't care' is set */
|
||||
break;
|
||||
case L1TF_MITIGATION_FLUSH:
|
||||
case L1TF_MITIGATION_FLUSH_NOSMT:
|
||||
case L1TF_MITIGATION_FULL:
|
||||
/*
|
||||
* Warn upon starting the first VM in a potentially
|
||||
* insecure environment.
|
||||
*/
|
||||
if (cpu_smt_control == CPU_SMT_ENABLED)
|
||||
pr_warn_once(L1TF_MSG_SMT);
|
||||
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
|
||||
pr_warn_once(L1TF_MSG_L1D);
|
||||
break;
|
||||
case L1TF_MITIGATION_FULL_FORCE:
|
||||
/* Flush is enforced */
|
||||
break;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init vmx_check_processor_compat(void *rtn)
|
||||
{
|
||||
struct vmcs_config vmcs_conf;
|
||||
|
@ -10091,6 +10373,15 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
|||
*/
|
||||
vmx_set_constant_host_state(vmx);
|
||||
|
||||
/*
|
||||
* Set the MSR load/store lists to match L0's settings.
|
||||
*/
|
||||
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
|
||||
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
|
||||
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
|
||||
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
|
||||
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
|
||||
|
||||
/*
|
||||
* HOST_RSP is normally set correctly in vmx_vcpu_run() just before
|
||||
* entry, but only if the current (host) sp changed from the value
|
||||
|
@ -10441,6 +10732,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
|||
|
||||
vmcs12->launch_state = 1;
|
||||
|
||||
/* Hide L1D cache contents from the nested guest. */
|
||||
vmx->vcpu.arch.l1tf_flush_l1d = true;
|
||||
|
||||
if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
|
||||
return kvm_vcpu_halt(vcpu);
|
||||
|
||||
|
@ -10935,6 +11229,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
|||
load_vmcs12_host_state(vcpu, vmcs12);
|
||||
|
||||
/* Update any VMCS fields that might have changed while L2 ran */
|
||||
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
|
||||
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
|
||||
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
|
||||
if (vmx->hv_deadline_tsc == -1)
|
||||
vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
|
||||
|
@ -11366,6 +11662,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
|||
.cpu_has_accelerated_tpr = report_flexpriority,
|
||||
.has_emulated_msr = vmx_has_emulated_msr,
|
||||
|
||||
.vm_init = vmx_vm_init,
|
||||
|
||||
.vcpu_create = vmx_create_vcpu,
|
||||
.vcpu_free = vmx_free_vcpu,
|
||||
.vcpu_reset = vmx_vcpu_reset,
|
||||
|
@ -11375,6 +11673,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
|||
.vcpu_put = vmx_vcpu_put,
|
||||
|
||||
.update_bp_intercept = update_exception_bitmap,
|
||||
.get_msr_feature = vmx_get_msr_feature,
|
||||
.get_msr = vmx_get_msr,
|
||||
.set_msr = vmx_set_msr,
|
||||
.get_segment_base = vmx_get_segment_base,
|
||||
|
@ -11485,13 +11784,54 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
|||
.setup_mce = vmx_setup_mce,
|
||||
};
|
||||
|
||||
static void vmx_cleanup_l1d_flush(void)
|
||||
{
|
||||
if (vmx_l1d_flush_pages) {
|
||||
free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
|
||||
vmx_l1d_flush_pages = NULL;
|
||||
}
|
||||
/* Restore state so sysfs ignores VMX */
|
||||
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
|
||||
}
|
||||
|
||||
|
||||
static void vmx_exit(void)
|
||||
{
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
|
||||
synchronize_rcu();
|
||||
#endif
|
||||
|
||||
kvm_exit();
|
||||
|
||||
vmx_cleanup_l1d_flush();
|
||||
}
|
||||
module_exit(vmx_exit)
|
||||
|
||||
static int __init vmx_init(void)
|
||||
{
|
||||
int r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
|
||||
__alignof__(struct vcpu_vmx), THIS_MODULE);
|
||||
int r;
|
||||
|
||||
r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
|
||||
__alignof__(struct vcpu_vmx), THIS_MODULE);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/*
|
||||
* Must be called after kvm_init() so enable_ept is properly set
|
||||
* up. Hand the parameter mitigation value in which was stored in
|
||||
* the pre module init parser. If no parameter was given, it will
|
||||
* contain 'auto' which will be turned into the default 'cond'
|
||||
* mitigation mode.
|
||||
*/
|
||||
if (boot_cpu_has(X86_BUG_L1TF)) {
|
||||
r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
|
||||
if (r) {
|
||||
vmx_exit();
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
rcu_assign_pointer(crash_vmclear_loaded_vmcss,
|
||||
crash_vmclear_local_loaded_vmcss);
|
||||
|
@ -11499,16 +11839,4 @@ static int __init vmx_init(void)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit vmx_exit(void)
|
||||
{
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
|
||||
synchronize_rcu();
|
||||
#endif
|
||||
|
||||
kvm_exit();
|
||||
}
|
||||
|
||||
module_init(vmx_init)
|
||||
module_exit(vmx_exit)
|
||||
|
|
|
@ -180,6 +180,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
|||
{ "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
|
||||
{ "irq_injections", VCPU_STAT(irq_injections) },
|
||||
{ "nmi_injections", VCPU_STAT(nmi_injections) },
|
||||
{ "l1d_flush", VCPU_STAT(l1d_flush) },
|
||||
{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
|
||||
{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
|
||||
{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
|
||||
|
@ -1007,6 +1008,71 @@ static u32 emulated_msrs[] = {
|
|||
|
||||
static unsigned num_emulated_msrs;
|
||||
|
||||
/*
|
||||
* List of msr numbers which are used to expose MSR-based features that
|
||||
* can be used by a hypervisor to validate requested CPU features.
|
||||
*/
|
||||
static u32 msr_based_features[] = {
|
||||
MSR_F10H_DECFG,
|
||||
MSR_IA32_UCODE_REV,
|
||||
MSR_IA32_ARCH_CAPABILITIES,
|
||||
};
|
||||
|
||||
static unsigned int num_msr_based_features;
|
||||
|
||||
u64 kvm_get_arch_capabilities(void)
|
||||
{
|
||||
u64 data;
|
||||
|
||||
rdmsrl_safe(MSR_IA32_ARCH_CAPABILITIES, &data);
|
||||
|
||||
/*
|
||||
* If we're doing cache flushes (either "always" or "cond")
|
||||
* we will do one whenever the guest does a vmlaunch/vmresume.
|
||||
* If an outer hypervisor is doing the cache flush for us
|
||||
* (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that
|
||||
* capability to the guest too, and if EPT is disabled we're not
|
||||
* vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will
|
||||
* require a nested hypervisor to do a flush of its own.
|
||||
*/
|
||||
if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER)
|
||||
data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;
|
||||
|
||||
return data;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities);
|
||||
|
||||
static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
|
||||
{
|
||||
switch (msr->index) {
|
||||
case MSR_IA32_ARCH_CAPABILITIES:
|
||||
msr->data = kvm_get_arch_capabilities();
|
||||
break;
|
||||
case MSR_IA32_UCODE_REV:
|
||||
rdmsrl_safe(msr->index, &msr->data);
|
||||
break;
|
||||
default:
|
||||
if (kvm_x86_ops->get_msr_feature(msr))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
|
||||
{
|
||||
struct kvm_msr_entry msr;
|
||||
int r;
|
||||
|
||||
msr.index = index;
|
||||
r = kvm_get_msr_feature(&msr);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
*data = msr.data;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
|
||||
{
|
||||
if (efer & efer_reserved_bits)
|
||||
|
@ -2121,13 +2187,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
|
||||
switch (msr) {
|
||||
case MSR_AMD64_NB_CFG:
|
||||
case MSR_IA32_UCODE_REV:
|
||||
case MSR_IA32_UCODE_WRITE:
|
||||
case MSR_VM_HSAVE_PA:
|
||||
case MSR_AMD64_PATCH_LOADER:
|
||||
case MSR_AMD64_BU_CFG2:
|
||||
break;
|
||||
|
||||
case MSR_IA32_UCODE_REV:
|
||||
if (msr_info->host_initiated)
|
||||
vcpu->arch.microcode_version = data;
|
||||
break;
|
||||
case MSR_EFER:
|
||||
return set_efer(vcpu, data);
|
||||
case MSR_K7_HWCR:
|
||||
|
@ -2402,7 +2471,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|||
msr_info->data = 0;
|
||||
break;
|
||||
case MSR_IA32_UCODE_REV:
|
||||
msr_info->data = 0x100000000ULL;
|
||||
msr_info->data = vcpu->arch.microcode_version;
|
||||
break;
|
||||
case MSR_MTRRcap:
|
||||
case 0x200 ... 0x2ff:
|
||||
|
@ -2545,13 +2614,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
|
|||
int (*do_msr)(struct kvm_vcpu *vcpu,
|
||||
unsigned index, u64 *data))
|
||||
{
|
||||
int i, idx;
|
||||
int i;
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
for (i = 0; i < msrs->nmsrs; ++i)
|
||||
if (do_msr(vcpu, entries[i].index, &entries[i].data))
|
||||
break;
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
@ -2651,6 +2718,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
case KVM_CAP_ASSIGN_DEV_IRQ:
|
||||
case KVM_CAP_PCI_2_3:
|
||||
#endif
|
||||
case KVM_CAP_GET_MSR_FEATURES:
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_ADJUST_CLOCK:
|
||||
|
@ -2770,6 +2838,31 @@ long kvm_arch_dev_ioctl(struct file *filp,
|
|||
goto out;
|
||||
r = 0;
|
||||
break;
|
||||
case KVM_GET_MSR_FEATURE_INDEX_LIST: {
|
||||
struct kvm_msr_list __user *user_msr_list = argp;
|
||||
struct kvm_msr_list msr_list;
|
||||
unsigned int n;
|
||||
|
||||
r = -EFAULT;
|
||||
if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
|
||||
goto out;
|
||||
n = msr_list.nmsrs;
|
||||
msr_list.nmsrs = num_msr_based_features;
|
||||
if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
|
||||
goto out;
|
||||
r = -E2BIG;
|
||||
if (n < msr_list.nmsrs)
|
||||
goto out;
|
||||
r = -EFAULT;
|
||||
if (copy_to_user(user_msr_list->indices, &msr_based_features,
|
||||
num_msr_based_features * sizeof(u32)))
|
||||
goto out;
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
case KVM_GET_MSRS:
|
||||
r = msr_io(NULL, argp, do_get_msr_feature, 1);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
r = -EINVAL;
|
||||
|
@ -3451,12 +3544,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
|
|||
r = 0;
|
||||
break;
|
||||
}
|
||||
case KVM_GET_MSRS:
|
||||
case KVM_GET_MSRS: {
|
||||
int idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
r = msr_io(vcpu, argp, do_get_msr, 1);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
break;
|
||||
case KVM_SET_MSRS:
|
||||
}
|
||||
case KVM_SET_MSRS: {
|
||||
int idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
r = msr_io(vcpu, argp, do_set_msr, 0);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
break;
|
||||
}
|
||||
case KVM_TPR_ACCESS_REPORTING: {
|
||||
struct kvm_tpr_access_ctl tac;
|
||||
|
||||
|
@ -4236,6 +4335,19 @@ static void kvm_init_msr_list(void)
|
|||
j++;
|
||||
}
|
||||
num_emulated_msrs = j;
|
||||
|
||||
for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) {
|
||||
struct kvm_msr_entry msr;
|
||||
|
||||
msr.index = msr_based_features[i];
|
||||
if (kvm_get_msr_feature(&msr))
|
||||
continue;
|
||||
|
||||
if (j < i)
|
||||
msr_based_features[j] = msr_based_features[i];
|
||||
j++;
|
||||
}
|
||||
num_msr_based_features = j;
|
||||
}
|
||||
|
||||
static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
|
||||
|
@ -4476,6 +4588,9 @@ static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *v
|
|||
int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
|
||||
unsigned int bytes, struct x86_exception *exception)
|
||||
{
|
||||
/* kvm_write_guest_virt_system can pull in tons of pages. */
|
||||
vcpu->arch.l1tf_flush_l1d = true;
|
||||
|
||||
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
|
||||
PFERR_WRITE_MASK, exception);
|
||||
}
|
||||
|
@ -5574,6 +5689,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
|
|||
bool writeback = true;
|
||||
bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
|
||||
|
||||
vcpu->arch.l1tf_flush_l1d = true;
|
||||
|
||||
/*
|
||||
* Clear write_fault_to_shadow_pgtable here to ensure it is
|
||||
* never reused.
|
||||
|
@ -6929,6 +7046,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
|
|||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
vcpu->arch.l1tf_flush_l1d = true;
|
||||
|
||||
for (;;) {
|
||||
if (kvm_vcpu_running(vcpu)) {
|
||||
|
@ -7899,6 +8017,7 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
|
|||
|
||||
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
vcpu->arch.l1tf_flush_l1d = true;
|
||||
kvm_x86_ops->sched_in(vcpu, cpu);
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <asm/vsyscall.h> /* emulate_vsyscall */
|
||||
#include <asm/vm86.h> /* struct vm86 */
|
||||
#include <asm/mmu_context.h> /* vma_pkey() */
|
||||
#include <asm/sections.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <asm/trace/exceptions.h>
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
#include <linux/swap.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/bootmem.h> /* for max_low_pfn */
|
||||
#include <linux/swapfile.h>
|
||||
#include <linux/swapops.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/e820.h>
|
||||
|
@ -780,3 +782,26 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
|
|||
__cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
|
||||
__pte2cachemode_tbl[entry] = cache;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SWAP
|
||||
unsigned long max_swapfile_size(void)
|
||||
{
|
||||
unsigned long pages;
|
||||
|
||||
pages = generic_max_swapfile_size();
|
||||
|
||||
if (boot_cpu_has_bug(X86_BUG_L1TF)) {
|
||||
/* Limit the swap file size to MAX_PA/2 for L1TF workaround */
|
||||
unsigned long l1tf_limit = l1tf_pfn_limit() + 1;
|
||||
/*
|
||||
* We encode swap offsets also with 3 bits below those for pfn
|
||||
* which makes the usable limit higher.
|
||||
*/
|
||||
#if CONFIG_PGTABLE_LEVELS > 2
|
||||
l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
|
||||
#endif
|
||||
pages = min_t(unsigned long, l1tf_limit, pages);
|
||||
}
|
||||
return pages;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <asm/desc.h>
|
||||
#include <asm/cmdline.h>
|
||||
#include <asm/vsyscall.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
int kaiser_enabled __read_mostly = 1;
|
||||
EXPORT_SYMBOL(kaiser_enabled); /* for inlined TLB flush functions */
|
||||
|
|
|
@ -125,24 +125,29 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
|
|||
|
||||
static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
|
||||
{
|
||||
pmd_t new_pmd;
|
||||
pmdval_t v = pmd_val(*pmd);
|
||||
if (clear) {
|
||||
*old = v & _PAGE_PRESENT;
|
||||
v &= ~_PAGE_PRESENT;
|
||||
} else /* presume this has been called with clear==true previously */
|
||||
v |= *old;
|
||||
set_pmd(pmd, __pmd(v));
|
||||
*old = v;
|
||||
new_pmd = pmd_mknotpresent(*pmd);
|
||||
} else {
|
||||
/* Presume this has been called with clear==true previously */
|
||||
new_pmd = __pmd(*old);
|
||||
}
|
||||
set_pmd(pmd, new_pmd);
|
||||
}
|
||||
|
||||
static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
|
||||
{
|
||||
pteval_t v = pte_val(*pte);
|
||||
if (clear) {
|
||||
*old = v & _PAGE_PRESENT;
|
||||
v &= ~_PAGE_PRESENT;
|
||||
} else /* presume this has been called with clear==true previously */
|
||||
v |= *old;
|
||||
set_pte_atomic(pte, __pte(v));
|
||||
*old = v;
|
||||
/* Nothing should care about address */
|
||||
pte_clear(&init_mm, 0, pte);
|
||||
} else {
|
||||
/* Presume this has been called with clear==true previously */
|
||||
set_pte_atomic(pte, __pte(*old));
|
||||
}
|
||||
}
|
||||
|
||||
static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
|
||||
|
|
|
@ -121,3 +121,24 @@ const char *arch_vma_name(struct vm_area_struct *vma)
|
|||
return "[mpx]";
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only allow root to set high MMIO mappings to PROT_NONE.
|
||||
* This prevents an unpriv. user to set them to PROT_NONE and invert
|
||||
* them, then pointing to valid memory for L1TF speculation.
|
||||
*
|
||||
* Note: for locked down kernels may want to disable the root override.
|
||||
*/
|
||||
bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_L1TF))
|
||||
return true;
|
||||
if (!__pte_needs_invert(pgprot_val(prot)))
|
||||
return true;
|
||||
/* If it's real memory always allow */
|
||||
if (pfn_valid(pfn))
|
||||
return true;
|
||||
if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -1001,8 +1001,8 @@ static long populate_pmd(struct cpa_data *cpa,
|
|||
|
||||
pmd = pmd_offset(pud, start);
|
||||
|
||||
set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
|
||||
massage_pgprot(pmd_pgprot)));
|
||||
set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn,
|
||||
canon_pgprot(pmd_pgprot))));
|
||||
|
||||
start += PMD_SIZE;
|
||||
cpa->pfn += PMD_SIZE >> PAGE_SHIFT;
|
||||
|
@ -1074,8 +1074,8 @@ static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
|
|||
* Map everything starting from the Gb boundary, possibly with 1G pages
|
||||
*/
|
||||
while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
|
||||
set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
|
||||
massage_pgprot(pud_pgprot)));
|
||||
set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn,
|
||||
canon_pgprot(pud_pgprot))));
|
||||
|
||||
start += PUD_SIZE;
|
||||
cpa->pfn += PUD_SIZE >> PAGE_SHIFT;
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#include <asm/realmode.h>
|
||||
#include <asm/time.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
/*
|
||||
* We allocate runtime services regions bottom-up, starting from -4G, i.e.
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/dmi.h>
|
||||
#include <asm/efi.h>
|
||||
#include <asm/uv/uv.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#define EFI_MIN_RESERVE 5120
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <asm/intel-mid.h>
|
||||
#include <asm/intel_scu_ipc.h>
|
||||
#include <asm/io_apic.h>
|
||||
#include <asm/hw_irq.h>
|
||||
|
||||
#define TANGIER_EXT_TIMER0_MSI 12
|
||||
|
||||
|
|
|
@ -1283,6 +1283,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
|
|||
struct msg_desc msgdesc;
|
||||
|
||||
ack_APIC_irq();
|
||||
kvm_set_cpu_l1tf_flush_l1d();
|
||||
time_start = get_cycles();
|
||||
|
||||
bcp = &per_cpu(bau_control, smp_processor_id());
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <linux/frame.h>
|
||||
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
#include <xen/events.h>
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <asm/setup.h>
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/numa.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/xen/hypervisor.h>
|
||||
#include <asm/xen/hypercall.h>
|
||||
|
||||
|
|
|
@ -187,10 +187,12 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = {
|
|||
|
||||
static const struct lpss_device_desc byt_pwm_dev_desc = {
|
||||
.flags = LPSS_SAVE_CTX,
|
||||
.prv_offset = 0x800,
|
||||
};
|
||||
|
||||
static const struct lpss_device_desc bsw_pwm_dev_desc = {
|
||||
.flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
|
||||
.prv_offset = 0x800,
|
||||
};
|
||||
|
||||
static const struct lpss_device_desc byt_uart_dev_desc = {
|
||||
|
|
|
@ -525,16 +525,24 @@ ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
|
|||
return sprintf(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
ssize_t __weak cpu_show_l1tf(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return sprintf(buf, "Not affected\n");
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
|
||||
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
|
||||
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
|
||||
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
|
||||
static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
|
||||
|
||||
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||
&dev_attr_meltdown.attr,
|
||||
&dev_attr_spectre_v1.attr,
|
||||
&dev_attr_spectre_v2.attr,
|
||||
&dev_attr_spec_store_bypass.attr,
|
||||
&dev_attr_l1tf.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ struct file_priv {
|
|||
struct tpm_chip *chip;
|
||||
|
||||
/* Data passed to and from the tpm via the read/write calls */
|
||||
atomic_t data_pending;
|
||||
size_t data_pending;
|
||||
struct mutex buffer_mutex;
|
||||
|
||||
struct timer_list user_read_timer; /* user needs to claim result */
|
||||
|
@ -46,7 +46,7 @@ static void timeout_work(struct work_struct *work)
|
|||
struct file_priv *priv = container_of(work, struct file_priv, work);
|
||||
|
||||
mutex_lock(&priv->buffer_mutex);
|
||||
atomic_set(&priv->data_pending, 0);
|
||||
priv->data_pending = 0;
|
||||
memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
|
||||
mutex_unlock(&priv->buffer_mutex);
|
||||
}
|
||||
|
@ -72,7 +72,6 @@ static int tpm_open(struct inode *inode, struct file *file)
|
|||
}
|
||||
|
||||
priv->chip = chip;
|
||||
atomic_set(&priv->data_pending, 0);
|
||||
mutex_init(&priv->buffer_mutex);
|
||||
setup_timer(&priv->user_read_timer, user_reader_timeout,
|
||||
(unsigned long)priv);
|
||||
|
@ -86,28 +85,24 @@ static ssize_t tpm_read(struct file *file, char __user *buf,
|
|||
size_t size, loff_t *off)
|
||||
{
|
||||
struct file_priv *priv = file->private_data;
|
||||
ssize_t ret_size;
|
||||
ssize_t ret_size = 0;
|
||||
int rc;
|
||||
|
||||
del_singleshot_timer_sync(&priv->user_read_timer);
|
||||
flush_work(&priv->work);
|
||||
ret_size = atomic_read(&priv->data_pending);
|
||||
if (ret_size > 0) { /* relay data */
|
||||
ssize_t orig_ret_size = ret_size;
|
||||
if (size < ret_size)
|
||||
ret_size = size;
|
||||
mutex_lock(&priv->buffer_mutex);
|
||||
|
||||
mutex_lock(&priv->buffer_mutex);
|
||||
if (priv->data_pending) {
|
||||
ret_size = min_t(ssize_t, size, priv->data_pending);
|
||||
rc = copy_to_user(buf, priv->data_buffer, ret_size);
|
||||
memset(priv->data_buffer, 0, orig_ret_size);
|
||||
memset(priv->data_buffer, 0, priv->data_pending);
|
||||
if (rc)
|
||||
ret_size = -EFAULT;
|
||||
|
||||
mutex_unlock(&priv->buffer_mutex);
|
||||
priv->data_pending = 0;
|
||||
}
|
||||
|
||||
atomic_set(&priv->data_pending, 0);
|
||||
|
||||
mutex_unlock(&priv->buffer_mutex);
|
||||
return ret_size;
|
||||
}
|
||||
|
||||
|
@ -118,18 +113,20 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
|
|||
size_t in_size = size;
|
||||
ssize_t out_size;
|
||||
|
||||
/* cannot perform a write until the read has cleared
|
||||
either via tpm_read or a user_read_timer timeout.
|
||||
This also prevents splitted buffered writes from blocking here.
|
||||
*/
|
||||
if (atomic_read(&priv->data_pending) != 0)
|
||||
return -EBUSY;
|
||||
|
||||
if (in_size > TPM_BUFSIZE)
|
||||
return -E2BIG;
|
||||
|
||||
mutex_lock(&priv->buffer_mutex);
|
||||
|
||||
/* Cannot perform a write until the read has cleared either via
|
||||
* tpm_read or a user_read_timer timeout. This also prevents split
|
||||
* buffered writes from blocking here.
|
||||
*/
|
||||
if (priv->data_pending != 0) {
|
||||
mutex_unlock(&priv->buffer_mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (copy_from_user
|
||||
(priv->data_buffer, (void __user *) buf, in_size)) {
|
||||
mutex_unlock(&priv->buffer_mutex);
|
||||
|
@ -159,7 +156,7 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
|
|||
return out_size;
|
||||
}
|
||||
|
||||
atomic_set(&priv->data_pending, out_size);
|
||||
priv->data_pending = out_size;
|
||||
mutex_unlock(&priv->buffer_mutex);
|
||||
|
||||
/* Set a timeout by which the reader must come claim the result */
|
||||
|
@ -178,7 +175,7 @@ static int tpm_release(struct inode *inode, struct file *file)
|
|||
del_singleshot_timer_sync(&priv->user_read_timer);
|
||||
flush_work(&priv->work);
|
||||
file->private_data = NULL;
|
||||
atomic_set(&priv->data_pending, 0);
|
||||
priv->data_pending = 0;
|
||||
clear_bit(0, &priv->chip->is_open);
|
||||
kfree(priv);
|
||||
return 0;
|
||||
|
|
|
@ -122,16 +122,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
|
|||
umem->address = addr;
|
||||
umem->page_size = PAGE_SIZE;
|
||||
umem->pid = get_task_pid(current, PIDTYPE_PID);
|
||||
/*
|
||||
* We ask for writable memory if any of the following
|
||||
* access flags are set. "Local write" and "remote write"
|
||||
* obviously require write access. "Remote atomic" can do
|
||||
* things like fetch and add, which will modify memory, and
|
||||
* "MW bind" can change permissions by binding a window.
|
||||
*/
|
||||
umem->writable = !!(access &
|
||||
(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
|
||||
IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
|
||||
umem->writable = ib_access_writable(access);
|
||||
|
||||
if (access & IB_ACCESS_ON_DEMAND) {
|
||||
put_pid(umem->pid);
|
||||
|
|
|
@ -131,6 +131,40 @@ out:
|
|||
return err;
|
||||
}
|
||||
|
||||
static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
|
||||
u64 length, u64 virt_addr,
|
||||
int access_flags)
|
||||
{
|
||||
/*
|
||||
* Force registering the memory as writable if the underlying pages
|
||||
* are writable. This is so rereg can change the access permissions
|
||||
* from readable to writable without having to run through ib_umem_get
|
||||
* again
|
||||
*/
|
||||
if (!ib_access_writable(access_flags)) {
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
/*
|
||||
* FIXME: Ideally this would iterate over all the vmas that
|
||||
* cover the memory, but for now it requires a single vma to
|
||||
* entirely cover the MR to support RO mappings.
|
||||
*/
|
||||
vma = find_vma(current->mm, start);
|
||||
if (vma && vma->vm_end >= start + length &&
|
||||
vma->vm_start <= start) {
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
access_flags |= IB_ACCESS_LOCAL_WRITE;
|
||||
} else {
|
||||
access_flags |= IB_ACCESS_LOCAL_WRITE;
|
||||
}
|
||||
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
}
|
||||
|
||||
return ib_umem_get(context, start, length, access_flags, 0);
|
||||
}
|
||||
|
||||
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
u64 virt_addr, int access_flags,
|
||||
struct ib_udata *udata)
|
||||
|
@ -145,10 +179,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
if (!mr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* Force registering the memory as writable. */
|
||||
/* Used for memory re-registeration. HCA protects the access */
|
||||
mr->umem = ib_umem_get(pd->uobject->context, start, length,
|
||||
access_flags | IB_ACCESS_LOCAL_WRITE, 0);
|
||||
mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
|
||||
virt_addr, access_flags);
|
||||
if (IS_ERR(mr->umem)) {
|
||||
err = PTR_ERR(mr->umem);
|
||||
goto err_free;
|
||||
|
@ -215,6 +247,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
|
|||
}
|
||||
|
||||
if (flags & IB_MR_REREG_ACCESS) {
|
||||
if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
|
||||
return -EPERM;
|
||||
|
||||
err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
|
||||
convert_access(mr_access_flags));
|
||||
|
||||
|
@ -228,10 +263,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
|
|||
|
||||
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
|
||||
ib_umem_release(mmr->umem);
|
||||
mmr->umem = ib_umem_get(mr->uobject->context, start, length,
|
||||
mr_access_flags |
|
||||
IB_ACCESS_LOCAL_WRITE,
|
||||
0);
|
||||
mmr->umem =
|
||||
mlx4_get_umem_mr(mr->uobject->context, start, length,
|
||||
virt_addr, mr_access_flags);
|
||||
if (IS_ERR(mmr->umem)) {
|
||||
err = PTR_ERR(mmr->umem);
|
||||
/* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
|
||||
|
|
|
@ -645,7 +645,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
|
|||
struct ocrdma_stats *pstats = filp->private_data;
|
||||
struct ocrdma_dev *dev = pstats->dev;
|
||||
|
||||
if (count > 32)
|
||||
if (*ppos != 0 || count == 0 || count > sizeof(tmp_str))
|
||||
goto err;
|
||||
|
||||
if (copy_from_user(tmp_str, buffer, count))
|
||||
|
|
|
@ -2008,6 +2008,9 @@ static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
|
|||
|
||||
nand_set_flash_node(chip, dn);
|
||||
mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
|
||||
if (!mtd->name)
|
||||
return -ENOMEM;
|
||||
|
||||
mtd->owner = THIS_MODULE;
|
||||
mtd->dev.parent = dev;
|
||||
|
||||
|
|
|
@ -893,7 +893,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
|
|||
struct sk_buff *skb,
|
||||
struct sk_buff_head *list)
|
||||
{
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
RING_IDX cons = queue->rx.rsp_cons;
|
||||
struct sk_buff *nskb;
|
||||
|
||||
|
@ -902,15 +901,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
|
|||
RING_GET_RESPONSE(&queue->rx, ++cons);
|
||||
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
|
||||
|
||||
if (shinfo->nr_frags == MAX_SKB_FRAGS) {
|
||||
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
|
||||
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
|
||||
|
||||
BUG_ON(pull_to <= skb_headlen(skb));
|
||||
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
|
||||
}
|
||||
BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
|
||||
BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
|
||||
|
||||
skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
skb_frag_page(nfrag),
|
||||
rx->offset, rx->status, PAGE_SIZE);
|
||||
|
||||
skb_shinfo(nskb)->nr_frags = 0;
|
||||
|
|
|
@ -52,6 +52,8 @@
|
|||
#include <linux/pci.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/irq.h>
|
||||
|
||||
#include <asm/irqdomain.h>
|
||||
#include <asm/apic.h>
|
||||
#include <linux/msi.h>
|
||||
|
|
|
@ -520,18 +520,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
|
|||
static int sr_block_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
struct scsi_cd *cd;
|
||||
struct scsi_device *sdev;
|
||||
int ret = -ENXIO;
|
||||
|
||||
cd = scsi_cd_get(bdev->bd_disk);
|
||||
if (!cd)
|
||||
goto out;
|
||||
|
||||
sdev = cd->device;
|
||||
scsi_autopm_get_device(sdev);
|
||||
check_disk_change(bdev);
|
||||
|
||||
mutex_lock(&sr_mutex);
|
||||
cd = scsi_cd_get(bdev->bd_disk);
|
||||
if (cd) {
|
||||
ret = cdrom_open(&cd->cdi, bdev, mode);
|
||||
if (ret)
|
||||
scsi_cd_put(cd);
|
||||
}
|
||||
ret = cdrom_open(&cd->cdi, bdev, mode);
|
||||
mutex_unlock(&sr_mutex);
|
||||
|
||||
scsi_autopm_put_device(sdev);
|
||||
if (ret)
|
||||
scsi_cd_put(cd);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -559,6 +567,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
scsi_autopm_get_device(sdev);
|
||||
|
||||
/*
|
||||
* Send SCSI addressing ioctls directly to mid level, send other
|
||||
* ioctls to cdrom/block level.
|
||||
|
@ -567,15 +577,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
|||
case SCSI_IOCTL_GET_IDLUN:
|
||||
case SCSI_IOCTL_GET_BUS_NUMBER:
|
||||
ret = scsi_ioctl(sdev, cmd, argp);
|
||||
goto out;
|
||||
goto put;
|
||||
}
|
||||
|
||||
ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
|
||||
if (ret != -ENOSYS)
|
||||
goto out;
|
||||
goto put;
|
||||
|
||||
ret = scsi_ioctl(sdev, cmd, argp);
|
||||
|
||||
put:
|
||||
scsi_autopm_put_device(sdev);
|
||||
|
||||
out:
|
||||
mutex_unlock(&sr_mutex);
|
||||
return ret;
|
||||
|
|
13
fs/dcache.c
13
fs/dcache.c
|
@ -352,14 +352,11 @@ static void dentry_unlink_inode(struct dentry * dentry)
|
|||
__releases(dentry->d_inode->i_lock)
|
||||
{
|
||||
struct inode *inode = dentry->d_inode;
|
||||
bool hashed = !d_unhashed(dentry);
|
||||
|
||||
if (hashed)
|
||||
raw_write_seqcount_begin(&dentry->d_seq);
|
||||
raw_write_seqcount_begin(&dentry->d_seq);
|
||||
__d_clear_type_and_inode(dentry);
|
||||
hlist_del_init(&dentry->d_u.d_alias);
|
||||
if (hashed)
|
||||
raw_write_seqcount_end(&dentry->d_seq);
|
||||
raw_write_seqcount_end(&dentry->d_seq);
|
||||
spin_unlock(&dentry->d_lock);
|
||||
spin_unlock(&inode->i_lock);
|
||||
if (!inode->i_nlink)
|
||||
|
@ -1914,10 +1911,12 @@ struct dentry *d_make_root(struct inode *root_inode)
|
|||
|
||||
if (root_inode) {
|
||||
res = __d_alloc(root_inode->i_sb, NULL);
|
||||
if (res)
|
||||
if (res) {
|
||||
res->d_flags |= DCACHE_RCUACCESS;
|
||||
d_instantiate(res, root_inode);
|
||||
else
|
||||
} else {
|
||||
iput(root_inode);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -1320,7 +1320,10 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
|
|||
ext4_itable_unused_count(sb, gdp)),
|
||||
sbi->s_inodes_per_block);
|
||||
|
||||
if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
|
||||
if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
|
||||
((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
|
||||
ext4_itable_unused_count(sb, gdp)) <
|
||||
EXT4_FIRST_INO(sb)))) {
|
||||
ext4_error(sb, "Something is wrong with group %u: "
|
||||
"used itable blocks: %d; "
|
||||
"itable unused count: %u",
|
||||
|
|
|
@ -3035,14 +3035,8 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
|
|||
if (!gdp)
|
||||
continue;
|
||||
|
||||
if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
|
||||
continue;
|
||||
if (group != 0)
|
||||
if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)))
|
||||
break;
|
||||
ext4_error(sb, "Inode table for bg 0 marked as "
|
||||
"needing zeroing");
|
||||
if (sb->s_flags & MS_RDONLY)
|
||||
return ngroups;
|
||||
}
|
||||
|
||||
return group;
|
||||
|
|
|
@ -605,12 +605,21 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
|
|||
return 0;
|
||||
mnt = real_mount(bastard);
|
||||
mnt_add_count(mnt, 1);
|
||||
smp_mb(); // see mntput_no_expire()
|
||||
if (likely(!read_seqretry(&mount_lock, seq)))
|
||||
return 0;
|
||||
if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
|
||||
mnt_add_count(mnt, -1);
|
||||
return 1;
|
||||
}
|
||||
lock_mount_hash();
|
||||
if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
|
||||
mnt_add_count(mnt, -1);
|
||||
unlock_mount_hash();
|
||||
return 1;
|
||||
}
|
||||
unlock_mount_hash();
|
||||
/* caller will mntput() */
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -1157,12 +1166,27 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
|
|||
static void mntput_no_expire(struct mount *mnt)
|
||||
{
|
||||
rcu_read_lock();
|
||||
mnt_add_count(mnt, -1);
|
||||
if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
|
||||
if (likely(READ_ONCE(mnt->mnt_ns))) {
|
||||
/*
|
||||
* Since we don't do lock_mount_hash() here,
|
||||
* ->mnt_ns can change under us. However, if it's
|
||||
* non-NULL, then there's a reference that won't
|
||||
* be dropped until after an RCU delay done after
|
||||
* turning ->mnt_ns NULL. So if we observe it
|
||||
* non-NULL under rcu_read_lock(), the reference
|
||||
* we are dropping is not the final one.
|
||||
*/
|
||||
mnt_add_count(mnt, -1);
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
lock_mount_hash();
|
||||
/*
|
||||
* make sure that if __legitimize_mnt() has not seen us grab
|
||||
* mount_lock, we'll see their refcount increment here.
|
||||
*/
|
||||
smp_mb();
|
||||
mnt_add_count(mnt, -1);
|
||||
if (mnt_get_count(mnt)) {
|
||||
rcu_read_unlock();
|
||||
unlock_mount_hash();
|
||||
|
|
|
@ -43,10 +43,11 @@ static void proc_evict_inode(struct inode *inode)
|
|||
de = PDE(inode);
|
||||
if (de)
|
||||
pde_put(de);
|
||||
|
||||
head = PROC_I(inode)->sysctl;
|
||||
if (head) {
|
||||
RCU_INIT_POINTER(PROC_I(inode)->sysctl, NULL);
|
||||
sysctl_head_put(head);
|
||||
proc_sys_evict_inode(inode, head);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -65,6 +65,7 @@ struct proc_inode {
|
|||
struct proc_dir_entry *pde;
|
||||
struct ctl_table_header *sysctl;
|
||||
struct ctl_table *sysctl_entry;
|
||||
struct hlist_node sysctl_inodes;
|
||||
const struct proc_ns_operations *ns_ops;
|
||||
struct inode vfs_inode;
|
||||
};
|
||||
|
@ -249,10 +250,12 @@ extern void proc_thread_self_init(void);
|
|||
*/
|
||||
#ifdef CONFIG_PROC_SYSCTL
|
||||
extern int proc_sys_init(void);
|
||||
extern void sysctl_head_put(struct ctl_table_header *);
|
||||
extern void proc_sys_evict_inode(struct inode *inode,
|
||||
struct ctl_table_header *head);
|
||||
#else
|
||||
static inline void proc_sys_init(void) { }
|
||||
static inline void sysctl_head_put(struct ctl_table_header *head) { }
|
||||
static inline void proc_sys_evict_inode(struct inode *inode,
|
||||
struct ctl_table_header *head) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
@ -190,6 +190,7 @@ static void init_header(struct ctl_table_header *head,
|
|||
head->set = set;
|
||||
head->parent = NULL;
|
||||
head->node = node;
|
||||
INIT_HLIST_HEAD(&head->inodes);
|
||||
if (node) {
|
||||
struct ctl_table *entry;
|
||||
for (entry = table; entry->procname; entry++, node++)
|
||||
|
@ -259,6 +260,44 @@ static void unuse_table(struct ctl_table_header *p)
|
|||
complete(p->unregistering);
|
||||
}
|
||||
|
||||
static void proc_sys_prune_dcache(struct ctl_table_header *head)
|
||||
{
|
||||
struct inode *inode;
|
||||
struct proc_inode *ei;
|
||||
struct hlist_node *node;
|
||||
struct super_block *sb;
|
||||
|
||||
rcu_read_lock();
|
||||
for (;;) {
|
||||
node = hlist_first_rcu(&head->inodes);
|
||||
if (!node)
|
||||
break;
|
||||
ei = hlist_entry(node, struct proc_inode, sysctl_inodes);
|
||||
spin_lock(&sysctl_lock);
|
||||
hlist_del_init_rcu(&ei->sysctl_inodes);
|
||||
spin_unlock(&sysctl_lock);
|
||||
|
||||
inode = &ei->vfs_inode;
|
||||
sb = inode->i_sb;
|
||||
if (!atomic_inc_not_zero(&sb->s_active))
|
||||
continue;
|
||||
inode = igrab(inode);
|
||||
rcu_read_unlock();
|
||||
if (unlikely(!inode)) {
|
||||
deactivate_super(sb);
|
||||
rcu_read_lock();
|
||||
continue;
|
||||
}
|
||||
|
||||
d_prune_aliases(inode);
|
||||
iput(inode);
|
||||
deactivate_super(sb);
|
||||
|
||||
rcu_read_lock();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* called under sysctl_lock, will reacquire if has to wait */
|
||||
static void start_unregistering(struct ctl_table_header *p)
|
||||
{
|
||||
|
@ -272,33 +311,24 @@ static void start_unregistering(struct ctl_table_header *p)
|
|||
p->unregistering = &wait;
|
||||
spin_unlock(&sysctl_lock);
|
||||
wait_for_completion(&wait);
|
||||
spin_lock(&sysctl_lock);
|
||||
} else {
|
||||
/* anything non-NULL; we'll never dereference it */
|
||||
p->unregistering = ERR_PTR(-EINVAL);
|
||||
spin_unlock(&sysctl_lock);
|
||||
}
|
||||
/*
|
||||
* Prune dentries for unregistered sysctls: namespaced sysctls
|
||||
* can have duplicate names and contaminate dcache very badly.
|
||||
*/
|
||||
proc_sys_prune_dcache(p);
|
||||
/*
|
||||
* do not remove from the list until nobody holds it; walking the
|
||||
* list in do_sysctl() relies on that.
|
||||
*/
|
||||
spin_lock(&sysctl_lock);
|
||||
erase_header(p);
|
||||
}
|
||||
|
||||
static void sysctl_head_get(struct ctl_table_header *head)
|
||||
{
|
||||
spin_lock(&sysctl_lock);
|
||||
head->count++;
|
||||
spin_unlock(&sysctl_lock);
|
||||
}
|
||||
|
||||
void sysctl_head_put(struct ctl_table_header *head)
|
||||
{
|
||||
spin_lock(&sysctl_lock);
|
||||
if (!--head->count)
|
||||
kfree_rcu(head, rcu);
|
||||
spin_unlock(&sysctl_lock);
|
||||
}
|
||||
|
||||
static struct ctl_table_header *sysctl_head_grab(struct ctl_table_header *head)
|
||||
{
|
||||
BUG_ON(!head);
|
||||
|
@ -440,10 +470,20 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
|
|||
|
||||
inode->i_ino = get_next_ino();
|
||||
|
||||
sysctl_head_get(head);
|
||||
ei = PROC_I(inode);
|
||||
|
||||
spin_lock(&sysctl_lock);
|
||||
if (unlikely(head->unregistering)) {
|
||||
spin_unlock(&sysctl_lock);
|
||||
iput(inode);
|
||||
inode = NULL;
|
||||
goto out;
|
||||
}
|
||||
ei->sysctl = head;
|
||||
ei->sysctl_entry = table;
|
||||
hlist_add_head_rcu(&ei->sysctl_inodes, &head->inodes);
|
||||
head->count++;
|
||||
spin_unlock(&sysctl_lock);
|
||||
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
|
||||
inode->i_mode = table->mode;
|
||||
|
@ -466,6 +506,15 @@ out:
|
|||
return inode;
|
||||
}
|
||||
|
||||
void proc_sys_evict_inode(struct inode *inode, struct ctl_table_header *head)
|
||||
{
|
||||
spin_lock(&sysctl_lock);
|
||||
hlist_del_init_rcu(&PROC_I(inode)->sysctl_inodes);
|
||||
if (!--head->count)
|
||||
kfree_rcu(head, rcu);
|
||||
spin_unlock(&sysctl_lock);
|
||||
}
|
||||
|
||||
static struct ctl_table_header *grab_header(struct inode *inode)
|
||||
{
|
||||
struct ctl_table_header *head = PROC_I(inode)->sysctl;
|
||||
|
|
|
@ -828,6 +828,19 @@ static inline int pmd_free_pte_page(pmd_t *pmd)
|
|||
struct file;
|
||||
int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t *vma_prot);
|
||||
|
||||
#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
|
||||
static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool arch_has_pfn_modify_check(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#ifndef io_remap_pfn_range
|
||||
|
|
|
@ -16,6 +16,14 @@
|
|||
*/
|
||||
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
|
||||
|
||||
#undef __no_sanitize_address
|
||||
#define __no_sanitize_address __attribute__((no_sanitize("address")))
|
||||
|
||||
/* Clang doesn't have a way to turn it off per-function, yet. */
|
||||
#ifdef __noretpoline
|
||||
#undef __noretpoline
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_LTO_CLANG
|
||||
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
|
||||
#define __norecordmcount \
|
||||
|
@ -32,8 +40,3 @@
|
|||
#if __has_feature(address_sanitizer)
|
||||
#define __SANITIZE_ADDRESS__
|
||||
#endif
|
||||
|
||||
/* Clang doesn't have a way to turn it off per-function, yet. */
|
||||
#ifdef __noretpoline
|
||||
#undef __noretpoline
|
||||
#endif
|
||||
|
|
|
@ -29,7 +29,7 @@ struct cpu {
|
|||
};
|
||||
|
||||
extern void boot_cpu_init(void);
|
||||
extern void boot_cpu_state_init(void);
|
||||
extern void boot_cpu_hotplug_init(void);
|
||||
|
||||
extern int register_cpu(struct cpu *cpu, int num);
|
||||
extern struct device *get_cpu_device(unsigned cpu);
|
||||
|
@ -52,6 +52,8 @@ extern ssize_t cpu_show_spectre_v2(struct device *dev,
|
|||
struct device_attribute *attr, char *buf);
|
||||
extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
extern ssize_t cpu_show_l1tf(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
|
||||
extern __printf(4, 5)
|
||||
struct device *cpu_device_create(struct device *parent, void *drvdata,
|
||||
|
@ -255,6 +257,25 @@ void cpuhp_report_idle_dead(void);
|
|||
static inline void cpuhp_report_idle_dead(void) { }
|
||||
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
|
||||
|
||||
enum cpuhp_smt_control {
|
||||
CPU_SMT_ENABLED,
|
||||
CPU_SMT_DISABLED,
|
||||
CPU_SMT_FORCE_DISABLED,
|
||||
CPU_SMT_NOT_SUPPORTED,
|
||||
};
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
|
||||
extern enum cpuhp_smt_control cpu_smt_control;
|
||||
extern void cpu_smt_disable(bool force);
|
||||
extern void cpu_smt_check_topology_early(void);
|
||||
extern void cpu_smt_check_topology(void);
|
||||
#else
|
||||
# define cpu_smt_control (CPU_SMT_ENABLED)
|
||||
static inline void cpu_smt_disable(bool force) { }
|
||||
static inline void cpu_smt_check_topology_early(void) { }
|
||||
static inline void cpu_smt_check_topology(void) { }
|
||||
#endif
|
||||
|
||||
#define IDLE_START 1
|
||||
#define IDLE_END 2
|
||||
|
||||
|
|
|
@ -9,5 +9,7 @@ extern spinlock_t swap_lock;
|
|||
extern struct plist_head swap_active_head;
|
||||
extern struct swap_info_struct *swap_info[];
|
||||
extern int try_to_unuse(unsigned int, bool, unsigned long);
|
||||
extern unsigned long generic_max_swapfile_size(void);
|
||||
extern unsigned long max_swapfile_size(void);
|
||||
|
||||
#endif /* _LINUX_SWAPFILE_H */
|
||||
|
|
|
@ -143,6 +143,7 @@ struct ctl_table_header
|
|||
struct ctl_table_set *set;
|
||||
struct ctl_dir *parent;
|
||||
struct ctl_node *node;
|
||||
struct hlist_head inodes; /* head for proc_inode->sysctl_inodes */
|
||||
};
|
||||
|
||||
struct ctl_dir {
|
||||
|
|
|
@ -3308,6 +3308,20 @@ static inline int ib_check_mr_access(int flags)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline bool ib_access_writable(int access_flags)
|
||||
{
|
||||
/*
|
||||
* We have writable memory backing the MR if any of the following
|
||||
* access flags are set. "Local write" and "remote write" obviously
|
||||
* require write access. "Remote atomic" can do things like fetch and
|
||||
* add, which will modify memory, and "MW bind" can change permissions
|
||||
* by binding a window.
|
||||
*/
|
||||
return access_flags &
|
||||
(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
|
||||
IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
|
||||
}
|
||||
|
||||
/**
|
||||
* ib_check_mr_status: lightweight check of MR status.
|
||||
* This routine may provide status checks on a selected
|
||||
|
|
|
@ -717,6 +717,7 @@ struct kvm_ppc_smmu_info {
|
|||
#define KVM_TRACE_PAUSE __KVM_DEPRECATED_MAIN_0x07
|
||||
#define KVM_TRACE_DISABLE __KVM_DEPRECATED_MAIN_0x08
|
||||
#define KVM_GET_EMULATED_CPUID _IOWR(KVMIO, 0x09, struct kvm_cpuid2)
|
||||
#define KVM_GET_MSR_FEATURE_INDEX_LIST _IOWR(KVMIO, 0x0a, struct kvm_msr_list)
|
||||
|
||||
/*
|
||||
* Extension capability list.
|
||||
|
@ -871,6 +872,7 @@ struct kvm_ppc_smmu_info {
|
|||
#define KVM_CAP_MSI_DEVID 131
|
||||
#define KVM_CAP_PPC_HTM 132
|
||||
#define KVM_CAP_S390_BPB 152
|
||||
#define KVM_CAP_GET_MSR_FEATURES 153
|
||||
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
|
||||
|
|
|
@ -509,8 +509,8 @@ asmlinkage __visible void __init start_kernel(void)
|
|||
setup_command_line(command_line);
|
||||
setup_nr_cpu_ids();
|
||||
setup_per_cpu_areas();
|
||||
boot_cpu_state_init();
|
||||
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
|
||||
boot_cpu_hotplug_init();
|
||||
|
||||
build_all_zonelists(NULL, NULL);
|
||||
page_alloc_init();
|
||||
|
|
284
kernel/cpu.c
284
kernel/cpu.c
|
@ -54,6 +54,7 @@ struct cpuhp_cpu_state {
|
|||
bool rollback;
|
||||
bool single;
|
||||
bool bringup;
|
||||
bool booted_once;
|
||||
struct hlist_node *node;
|
||||
enum cpuhp_state cb_state;
|
||||
int result;
|
||||
|
@ -355,6 +356,85 @@ void cpu_hotplug_enable(void)
|
|||
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_SMT
|
||||
enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
|
||||
EXPORT_SYMBOL_GPL(cpu_smt_control);
|
||||
|
||||
static bool cpu_smt_available __read_mostly;
|
||||
|
||||
void __init cpu_smt_disable(bool force)
|
||||
{
|
||||
if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
|
||||
cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
|
||||
return;
|
||||
|
||||
if (force) {
|
||||
pr_info("SMT: Force disabled\n");
|
||||
cpu_smt_control = CPU_SMT_FORCE_DISABLED;
|
||||
} else {
|
||||
cpu_smt_control = CPU_SMT_DISABLED;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The decision whether SMT is supported can only be done after the full
|
||||
* CPU identification. Called from architecture code before non boot CPUs
|
||||
* are brought up.
|
||||
*/
|
||||
void __init cpu_smt_check_topology_early(void)
|
||||
{
|
||||
if (!topology_smt_supported())
|
||||
cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
/*
|
||||
* If SMT was disabled by BIOS, detect it here, after the CPUs have been
|
||||
* brought online. This ensures the smt/l1tf sysfs entries are consistent
|
||||
* with reality. cpu_smt_available is set to true during the bringup of non
|
||||
* boot CPUs when a SMT sibling is detected. Note, this may overwrite
|
||||
* cpu_smt_control's previous setting.
|
||||
*/
|
||||
void __init cpu_smt_check_topology(void)
|
||||
{
|
||||
if (!cpu_smt_available)
|
||||
cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
static int __init smt_cmdline_disable(char *str)
|
||||
{
|
||||
cpu_smt_disable(str && !strcmp(str, "force"));
|
||||
return 0;
|
||||
}
|
||||
early_param("nosmt", smt_cmdline_disable);
|
||||
|
||||
static inline bool cpu_smt_allowed(unsigned int cpu)
|
||||
{
|
||||
if (topology_is_primary_thread(cpu))
|
||||
return true;
|
||||
|
||||
/*
|
||||
* If the CPU is not a 'primary' thread and the booted_once bit is
|
||||
* set then the processor has SMT support. Store this information
|
||||
* for the late check of SMT support in cpu_smt_check_topology().
|
||||
*/
|
||||
if (per_cpu(cpuhp_state, cpu).booted_once)
|
||||
cpu_smt_available = true;
|
||||
|
||||
if (cpu_smt_control == CPU_SMT_ENABLED)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* On x86 it's required to boot all logical CPUs at least once so
|
||||
* that the init code can get a chance to set CR4.MCE on each
|
||||
* CPU. Otherwise, a broadacasted MCE observing CR4.MCE=0b on any
|
||||
* core will shutdown the machine.
|
||||
*/
|
||||
return !per_cpu(cpuhp_state, cpu).booted_once;
|
||||
}
|
||||
#else
|
||||
static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
|
||||
#endif
|
||||
|
||||
/* Need to know about CPUs going up/down? */
|
||||
int register_cpu_notifier(struct notifier_block *nb)
|
||||
{
|
||||
|
@ -431,6 +511,16 @@ static int bringup_wait_for_ap(unsigned int cpu)
|
|||
stop_machine_unpark(cpu);
|
||||
kthread_unpark(st->thread);
|
||||
|
||||
/*
|
||||
* SMT soft disabling on X86 requires to bring the CPU out of the
|
||||
* BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
|
||||
* CPU marked itself as booted_once in cpu_notify_starting() so the
|
||||
* cpu_smt_allowed() check will now return false if this is not the
|
||||
* primary sibling.
|
||||
*/
|
||||
if (!cpu_smt_allowed(cpu))
|
||||
return -ECANCELED;
|
||||
|
||||
/* Should we go further up ? */
|
||||
if (st->target > CPUHP_AP_ONLINE_IDLE) {
|
||||
__cpuhp_kick_ap_work(st);
|
||||
|
@ -817,7 +907,6 @@ static int takedown_cpu(unsigned int cpu)
|
|||
|
||||
/* Park the smpboot threads */
|
||||
kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
|
||||
smpboot_park_threads(cpu);
|
||||
|
||||
/*
|
||||
* Prevent irq alloc/free while the dying cpu reorganizes the
|
||||
|
@ -956,20 +1045,19 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
|
||||
{
|
||||
if (cpu_hotplug_disabled)
|
||||
return -EBUSY;
|
||||
return _cpu_down(cpu, 0, target);
|
||||
}
|
||||
|
||||
static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
|
||||
{
|
||||
int err;
|
||||
|
||||
cpu_maps_update_begin();
|
||||
|
||||
if (cpu_hotplug_disabled) {
|
||||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = _cpu_down(cpu, 0, target);
|
||||
|
||||
out:
|
||||
err = cpu_down_maps_locked(cpu, target);
|
||||
cpu_maps_update_done();
|
||||
return err;
|
||||
}
|
||||
|
@ -993,6 +1081,7 @@ void notify_cpu_starting(unsigned int cpu)
|
|||
enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
|
||||
|
||||
rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
|
||||
st->booted_once = true;
|
||||
while (st->state < target) {
|
||||
st->state++;
|
||||
cpuhp_invoke_callback(cpu, st->state, true, NULL);
|
||||
|
@ -1098,6 +1187,10 @@ static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
|
|||
err = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
if (!cpu_smt_allowed(cpu)) {
|
||||
err = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = _cpu_up(cpu, 0, target);
|
||||
out:
|
||||
|
@ -1396,7 +1489,7 @@ static struct cpuhp_step cpuhp_ap_states[] = {
|
|||
[CPUHP_AP_SMPBOOT_THREADS] = {
|
||||
.name = "smpboot/threads:online",
|
||||
.startup.single = smpboot_unpark_threads,
|
||||
.teardown.single = NULL,
|
||||
.teardown.single = smpboot_park_threads,
|
||||
},
|
||||
[CPUHP_AP_PERF_ONLINE] = {
|
||||
.name = "perf:online",
|
||||
|
@ -1851,10 +1944,172 @@ static struct attribute_group cpuhp_cpu_root_attr_group = {
|
|||
NULL
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_SMT
|
||||
|
||||
static const char *smt_states[] = {
|
||||
[CPU_SMT_ENABLED] = "on",
|
||||
[CPU_SMT_DISABLED] = "off",
|
||||
[CPU_SMT_FORCE_DISABLED] = "forceoff",
|
||||
[CPU_SMT_NOT_SUPPORTED] = "notsupported",
|
||||
};
|
||||
|
||||
static ssize_t
|
||||
show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE - 2, "%s\n", smt_states[cpu_smt_control]);
|
||||
}
|
||||
|
||||
static void cpuhp_offline_cpu_device(unsigned int cpu)
|
||||
{
|
||||
struct device *dev = get_cpu_device(cpu);
|
||||
|
||||
dev->offline = true;
|
||||
/* Tell user space about the state change */
|
||||
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
|
||||
}
|
||||
|
||||
static void cpuhp_online_cpu_device(unsigned int cpu)
|
||||
{
|
||||
struct device *dev = get_cpu_device(cpu);
|
||||
|
||||
dev->offline = false;
|
||||
/* Tell user space about the state change */
|
||||
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
|
||||
}
|
||||
|
||||
static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
|
||||
{
|
||||
int cpu, ret = 0;
|
||||
|
||||
cpu_maps_update_begin();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (topology_is_primary_thread(cpu))
|
||||
continue;
|
||||
ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
|
||||
if (ret)
|
||||
break;
|
||||
/*
|
||||
* As this needs to hold the cpu maps lock it's impossible
|
||||
* to call device_offline() because that ends up calling
|
||||
* cpu_down() which takes cpu maps lock. cpu maps lock
|
||||
* needs to be held as this might race against in kernel
|
||||
* abusers of the hotplug machinery (thermal management).
|
||||
*
|
||||
* So nothing would update device:offline state. That would
|
||||
* leave the sysfs entry stale and prevent onlining after
|
||||
* smt control has been changed to 'off' again. This is
|
||||
* called under the sysfs hotplug lock, so it is properly
|
||||
* serialized against the regular offline usage.
|
||||
*/
|
||||
cpuhp_offline_cpu_device(cpu);
|
||||
}
|
||||
if (!ret)
|
||||
cpu_smt_control = ctrlval;
|
||||
cpu_maps_update_done();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int cpuhp_smt_enable(void)
|
||||
{
|
||||
int cpu, ret = 0;
|
||||
|
||||
cpu_maps_update_begin();
|
||||
cpu_smt_control = CPU_SMT_ENABLED;
|
||||
for_each_present_cpu(cpu) {
|
||||
/* Skip online CPUs and CPUs on offline nodes */
|
||||
if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
|
||||
continue;
|
||||
ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
|
||||
if (ret)
|
||||
break;
|
||||
/* See comment in cpuhp_smt_disable() */
|
||||
cpuhp_online_cpu_device(cpu);
|
||||
}
|
||||
cpu_maps_update_done();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
store_smt_control(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int ctrlval, ret;
|
||||
|
||||
if (sysfs_streq(buf, "on"))
|
||||
ctrlval = CPU_SMT_ENABLED;
|
||||
else if (sysfs_streq(buf, "off"))
|
||||
ctrlval = CPU_SMT_DISABLED;
|
||||
else if (sysfs_streq(buf, "forceoff"))
|
||||
ctrlval = CPU_SMT_FORCE_DISABLED;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
|
||||
return -EPERM;
|
||||
|
||||
if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
|
||||
return -ENODEV;
|
||||
|
||||
ret = lock_device_hotplug_sysfs();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (ctrlval != cpu_smt_control) {
|
||||
switch (ctrlval) {
|
||||
case CPU_SMT_ENABLED:
|
||||
ret = cpuhp_smt_enable();
|
||||
break;
|
||||
case CPU_SMT_DISABLED:
|
||||
case CPU_SMT_FORCE_DISABLED:
|
||||
ret = cpuhp_smt_disable(ctrlval);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
unlock_device_hotplug();
|
||||
return ret ? ret : count;
|
||||
}
|
||||
static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
|
||||
|
||||
static ssize_t
|
||||
show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
bool active = topology_max_smt_threads() > 1;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE - 2, "%d\n", active);
|
||||
}
|
||||
static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
|
||||
|
||||
static struct attribute *cpuhp_smt_attrs[] = {
|
||||
&dev_attr_control.attr,
|
||||
&dev_attr_active.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct attribute_group cpuhp_smt_attr_group = {
|
||||
.attrs = cpuhp_smt_attrs,
|
||||
.name = "smt",
|
||||
NULL
|
||||
};
|
||||
|
||||
static int __init cpu_smt_state_init(void)
|
||||
{
|
||||
return sysfs_create_group(&cpu_subsys.dev_root->kobj,
|
||||
&cpuhp_smt_attr_group);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline int cpu_smt_state_init(void) { return 0; }
|
||||
#endif
|
||||
|
||||
static int __init cpuhp_sysfs_init(void)
|
||||
{
|
||||
int cpu, ret;
|
||||
|
||||
ret = cpu_smt_state_init();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
|
||||
&cpuhp_cpu_root_attr_group);
|
||||
if (ret)
|
||||
|
@ -1951,9 +2206,12 @@ void __init boot_cpu_init(void)
|
|||
/*
|
||||
* Must be called _AFTER_ setting up the per_cpu areas
|
||||
*/
|
||||
void __init boot_cpu_state_init(void)
|
||||
void __init boot_cpu_hotplug_init(void)
|
||||
{
|
||||
per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
|
||||
#ifdef CONFIG_SMP
|
||||
this_cpu_write(cpuhp_state.booted_once, true);
|
||||
#endif
|
||||
this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
|
||||
}
|
||||
|
||||
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue