2007-05-02 19:27:12 +02:00
/*
* Copyright ( C ) 1994 Linus Torvalds
*
* Cyrix stuff , June 1998 by :
* - Rafael R . Reilova ( moved everything from head . S ) ,
* < rreilova @ ececs . uc . edu >
* - Channing Corn ( tests & fixes ) ,
* - Andrew D . Balsa ( code cleanup ) .
*/
# include <linux/init.h>
# include <linux/utsname.h>
2018-01-07 22:48:01 +01:00
# include <linux/cpu.h>
2018-01-25 15:50:28 -08:00
# include <linux/module.h>
2018-04-29 15:26:40 +02:00
# include <linux/nospec.h>
# include <linux/prctl.h>
2018-11-25 19:33:39 +01:00
# include <linux/sched/smt.h>
2018-01-11 21:46:26 +00:00
2018-04-29 15:01:37 +02:00
# include <asm/spec-ctrl.h>
2018-01-11 21:46:26 +00:00
# include <asm/cmdline.h>
2007-07-31 00:39:20 -07:00
# include <asm/bugs.h>
2007-05-02 19:27:12 +02:00
# include <asm/processor.h>
2008-01-30 13:30:39 +01:00
# include <asm/processor-flags.h>
2015-04-26 16:56:05 +02:00
# include <asm/fpu/internal.h>
2007-05-02 19:27:12 +02:00
# include <asm/msr.h>
2018-07-13 16:23:16 +02:00
# include <asm/vmx.h>
2007-05-02 19:27:12 +02:00
# include <asm/paravirt.h>
# include <asm/alternative.h>
2019-02-18 22:51:43 +01:00
# include <asm/hypervisor.h>
2016-10-24 19:38:43 +02:00
# include <asm/pgtable.h>
# include <asm/cacheflush.h>
2018-01-12 17:49:25 +00:00
# include <asm/intel-family.h>
2018-06-13 15:48:26 -07:00
# include <asm/e820.h>
2007-05-02 19:27:12 +02:00
2018-01-11 21:46:26 +00:00
static void __init spectre_v2_select_mitigation ( void ) ;
2018-04-25 22:04:21 -04:00
static void __init ssb_select_mitigation ( void ) ;
2018-06-13 15:48:26 -07:00
static void __init l1tf_select_mitigation ( void ) ;
2019-02-18 22:04:08 +01:00
static void __init mds_select_mitigation ( void ) ;
2018-01-11 21:46:26 +00:00
2018-09-25 14:38:55 +02:00
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
u64 x86_spec_ctrl_base ;
2018-05-12 20:49:16 +02:00
EXPORT_SYMBOL_GPL ( x86_spec_ctrl_base ) ;
2018-09-25 14:38:55 +02:00
static DEFINE_MUTEX ( spec_ctrl_mutex ) ;
2018-04-25 22:04:18 -04:00
2018-04-25 22:04:23 -04:00
/*
* The vendor and possibly platform specific bits which can be modified in
* x86_spec_ctrl_base .
*/
2018-05-12 20:10:00 +02:00
static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS ;
2018-04-25 22:04:23 -04:00
2018-05-20 20:52:05 +01:00
/*
* AMD specific MSR info for Speculative Store Bypass control .
2018-05-09 21:41:38 +02:00
* x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu ( ) .
2018-05-20 20:52:05 +01:00
*/
u64 __ro_after_init x86_amd_ls_cfg_base ;
2018-05-09 21:41:38 +02:00
u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask ;
2018-05-20 20:52:05 +01:00
2018-11-25 19:33:45 +01:00
/* Control conditional STIPB in switch_to() */
DEFINE_STATIC_KEY_FALSE ( switch_to_cond_stibp ) ;
2018-11-25 19:33:49 +01:00
/* Control conditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE ( switch_mm_cond_ibpb ) ;
/* Control unconditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE ( switch_mm_always_ibpb ) ;
2018-11-25 19:33:45 +01:00
2019-02-18 23:42:51 +01:00
/* Control MDS CPU buffer clear before returning to user space */
DEFINE_STATIC_KEY_FALSE ( mds_user_clear ) ;
2019-02-27 12:48:14 +01:00
EXPORT_SYMBOL_GPL ( mds_user_clear ) ;
2019-02-18 23:04:01 +01:00
/* Control MDS CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE ( mds_idle_clear ) ;
EXPORT_SYMBOL_GPL ( mds_idle_clear ) ;
2019-02-18 23:42:51 +01:00
2007-05-02 19:27:12 +02:00
void __init check_bugs ( void )
{
identify_boot_cpu ( ) ;
2013-04-08 17:57:44 +02:00
2018-07-13 16:23:24 +02:00
/*
* identify_boot_cpu ( ) initialized SMT support information , let the
* core code know .
*/
2018-08-07 08:19:57 +02:00
cpu_smt_check_topology_early ( ) ;
2018-07-13 16:23:24 +02:00
2016-10-24 19:38:43 +02:00
if ( ! IS_ENABLED ( CONFIG_SMP ) ) {
pr_info ( " CPU: " ) ;
print_cpu_info ( & boot_cpu_data ) ;
}
2018-04-25 22:04:18 -04:00
/*
* Read the SPEC_CTRL MSR to account for reserved bits which may
2018-05-20 20:52:05 +01:00
* have unknown values . AMD64_LS_CFG MSR is cached in the early AMD
* init code as it is not enumerated and depends on the family .
2018-04-25 22:04:18 -04:00
*/
2018-05-10 19:13:18 +02:00
if ( boot_cpu_has ( X86_FEATURE_MSR_SPEC_CTRL ) )
2018-04-25 22:04:18 -04:00
rdmsrl ( MSR_IA32_SPEC_CTRL , x86_spec_ctrl_base ) ;
2018-05-12 20:10:00 +02:00
/* Allow STIBP in MSR_SPEC_CTRL if supported */
if ( boot_cpu_has ( X86_FEATURE_STIBP ) )
x86_spec_ctrl_mask | = SPEC_CTRL_STIBP ;
2018-01-11 21:46:26 +00:00
/* Select the proper spectre mitigation before patching alternatives */
spectre_v2_select_mitigation ( ) ;
2018-04-25 22:04:21 -04:00
/*
* Select proper mitigation for any exposure to the Speculative Store
* Bypass vulnerability .
*/
ssb_select_mitigation ( ) ;
2018-06-13 15:48:26 -07:00
l1tf_select_mitigation ( ) ;
2019-02-18 22:04:08 +01:00
mds_select_mitigation ( ) ;
2019-04-02 10:00:14 -05:00
arch_smt_update ( ) ;
2016-10-24 19:38:43 +02:00
# ifdef CONFIG_X86_32
2013-04-08 17:57:44 +02:00
/*
* Check whether we are able to run this kernel safely on SMP .
*
* - i386 is no longer supported .
* - In order to run on anything without a TSC , we need to be
* compiled for a i486 .
*/
if ( boot_cpu_data . x86 < 4 )
panic ( " Kernel requires i486+ for 'invlpg' and other features " ) ;
2008-05-17 22:48:13 +02:00
init_utsname ( ) - > machine [ 1 ] =
' 0 ' + ( boot_cpu_data . x86 > 6 ? 6 : boot_cpu_data . x86 ) ;
2007-05-02 19:27:12 +02:00
alternative_instructions ( ) ;
2012-08-24 14:13:02 -07:00
2015-04-22 13:44:25 +02:00
fpu__init_check_bugs ( ) ;
2016-10-24 19:38:43 +02:00
# else /* CONFIG_X86_64 */
alternative_instructions ( ) ;
/*
* Make sure the first 2 MB area is not mapped by huge pages
* There are typically fixed size MTRRs in there and overlapping
* MTRRs into large pages causes slow downs .
*
* Right now we don ' t do that with gbpages because there seems
* very little benefit for that case .
*/
if ( ! direct_gbpages )
set_memory_4k ( ( unsigned long ) __va ( 0 ) , 1 ) ;
# endif
2007-05-02 19:27:12 +02:00
}
2018-01-07 22:48:01 +01:00
2018-05-12 00:14:51 +02:00
void
x86_virt_spec_ctrl ( u64 guest_spec_ctrl , u64 guest_virt_spec_ctrl , bool setguest )
2018-04-25 22:04:19 -04:00
{
2018-05-12 20:10:00 +02:00
u64 msrval , guestval , hostval = x86_spec_ctrl_base ;
2018-05-12 00:14:51 +02:00
struct thread_info * ti = current_thread_info ( ) ;
2018-04-29 15:21:42 +02:00
2018-05-10 19:13:18 +02:00
/* Is MSR_SPEC_CTRL implemented ? */
2018-05-12 00:14:51 +02:00
if ( static_cpu_has ( X86_FEATURE_MSR_SPEC_CTRL ) ) {
2018-05-12 20:10:00 +02:00
/*
* Restrict guest_spec_ctrl to supported values . Clear the
* modifiable bits in the host base value and or the
* modifiable bits from the guest value .
*/
guestval = hostval & ~ x86_spec_ctrl_mask ;
guestval | = guest_spec_ctrl & x86_spec_ctrl_mask ;
2018-05-12 00:14:51 +02:00
/* SSBD controlled in MSR_SPEC_CTRL */
2018-07-02 16:36:02 -05:00
if ( static_cpu_has ( X86_FEATURE_SPEC_CTRL_SSBD ) | |
static_cpu_has ( X86_FEATURE_AMD_SSBD ) )
2018-05-12 20:10:00 +02:00
hostval | = ssbd_tif_to_spec_ctrl ( ti - > flags ) ;
2018-05-12 00:14:51 +02:00
2018-11-25 19:33:46 +01:00
/* Conditional STIBP enabled? */
if ( static_branch_unlikely ( & switch_to_cond_stibp ) )
hostval | = stibp_tif_to_spec_ctrl ( ti - > flags ) ;
2018-05-12 20:10:00 +02:00
if ( hostval ! = guestval ) {
msrval = setguest ? guestval : hostval ;
wrmsrl ( MSR_IA32_SPEC_CTRL , msrval ) ;
2018-05-12 00:14:51 +02:00
}
}
2018-05-10 20:42:48 +02:00
/*
* If SSBD is not handled in MSR_SPEC_CTRL on AMD , update
* MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported .
*/
if ( ! static_cpu_has ( X86_FEATURE_LS_CFG_SSBD ) & &
! static_cpu_has ( X86_FEATURE_VIRT_SSBD ) )
return ;
/*
* If the host has SSBD mitigation enabled , force it in the host ' s
* virtual MSR value . If its not permanently enabled , evaluate
* current ' s TIF_SSBD thread flag .
*/
if ( static_cpu_has ( X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ) )
hostval = SPEC_CTRL_SSBD ;
else
hostval = ssbd_tif_to_spec_ctrl ( ti - > flags ) ;
/* Sanitize the guest value */
guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD ;
if ( hostval ! = guestval ) {
unsigned long tif ;
tif = setguest ? ssbd_spec_ctrl_to_tif ( guestval ) :
ssbd_spec_ctrl_to_tif ( hostval ) ;
2018-11-25 19:33:34 +01:00
speculation_ctrl_update ( tif ) ;
2018-05-10 20:42:48 +02:00
}
2018-04-25 22:04:19 -04:00
}
2018-05-12 00:14:51 +02:00
EXPORT_SYMBOL_GPL ( x86_virt_spec_ctrl ) ;
2018-04-25 22:04:19 -04:00
2018-05-09 21:41:38 +02:00
static void x86_amd_ssb_disable ( void )
2018-05-20 20:52:05 +01:00
{
2018-05-09 21:41:38 +02:00
u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask ;
2018-05-20 20:52:05 +01:00
2018-05-17 17:09:18 +02:00
if ( boot_cpu_has ( X86_FEATURE_VIRT_SSBD ) )
wrmsrl ( MSR_AMD64_VIRT_SPEC_CTRL , SPEC_CTRL_SSBD ) ;
else if ( boot_cpu_has ( X86_FEATURE_LS_CFG_SSBD ) )
2018-05-20 20:52:05 +01:00
wrmsrl ( MSR_AMD64_LS_CFG , msrval ) ;
}
2019-02-18 22:04:08 +01:00
# undef pr_fmt
# define pr_fmt(fmt) "MDS: " fmt
2019-04-12 17:50:57 -04:00
/* Default mitigation for MDS-affected CPUs */
2019-02-18 22:04:08 +01:00
static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL ;
2019-04-02 09:59:33 -05:00
static bool mds_nosmt __ro_after_init = false ;
2019-02-18 22:04:08 +01:00
static const char * const mds_strings [ ] = {
[ MDS_MITIGATION_OFF ] = " Vulnerable " ,
2019-02-20 09:40:40 +01:00
[ MDS_MITIGATION_FULL ] = " Mitigation: Clear CPU buffers " ,
[ MDS_MITIGATION_VMWERV ] = " Vulnerable: Clear CPU buffers attempted, no microcode " ,
2019-02-18 22:04:08 +01:00
} ;
static void __init mds_select_mitigation ( void )
{
2019-04-17 16:39:02 -05:00
if ( ! boot_cpu_has_bug ( X86_BUG_MDS ) | | cpu_mitigations_off ( ) ) {
2019-02-18 22:04:08 +01:00
mds_mitigation = MDS_MITIGATION_OFF ;
return ;
}
if ( mds_mitigation = = MDS_MITIGATION_FULL ) {
2019-02-20 09:40:40 +01:00
if ( ! boot_cpu_has ( X86_FEATURE_MD_CLEAR ) )
mds_mitigation = MDS_MITIGATION_VMWERV ;
2019-04-02 09:59:33 -05:00
2019-02-20 09:40:40 +01:00
static_branch_enable ( & mds_user_clear ) ;
2019-04-02 09:59:33 -05:00
2019-04-17 16:39:02 -05:00
if ( ! boot_cpu_has ( X86_BUG_MSBDS_ONLY ) & &
( mds_nosmt | | cpu_mitigations_auto_nosmt ( ) ) )
2019-04-02 09:59:33 -05:00
cpu_smt_disable ( false ) ;
2019-02-18 22:04:08 +01:00
}
2019-04-02 09:59:33 -05:00
2019-02-18 22:04:08 +01:00
pr_info ( " %s \n " , mds_strings [ mds_mitigation ] ) ;
}
static int __init mds_cmdline ( char * str )
{
if ( ! boot_cpu_has_bug ( X86_BUG_MDS ) )
return 0 ;
if ( ! str )
return - EINVAL ;
if ( ! strcmp ( str , " off " ) )
mds_mitigation = MDS_MITIGATION_OFF ;
else if ( ! strcmp ( str , " full " ) )
mds_mitigation = MDS_MITIGATION_FULL ;
2019-04-02 09:59:33 -05:00
else if ( ! strcmp ( str , " full,nosmt " ) ) {
mds_mitigation = MDS_MITIGATION_FULL ;
mds_nosmt = true ;
}
2019-02-18 22:04:08 +01:00
return 0 ;
}
early_param ( " mds " , mds_cmdline ) ;
2018-11-25 19:33:41 +01:00
# undef pr_fmt
# define pr_fmt(fmt) "Spectre V2 : " fmt
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
SPECTRE_V2_NONE ;
2018-11-25 19:33:45 +01:00
static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
SPECTRE_V2_USER_NONE ;
2018-01-25 15:50:28 -08:00
# ifdef RETPOLINE
2018-01-27 15:45:14 +01:00
static bool spectre_v2_bad_module ;
2018-01-25 15:50:28 -08:00
bool retpoline_module_ok ( bool has_retpoline )
{
if ( spectre_v2_enabled = = SPECTRE_V2_NONE | | has_retpoline )
return true ;
2018-01-30 19:32:18 +00:00
pr_err ( " System may be vulnerable to spectre v2 \n " ) ;
2018-01-25 15:50:28 -08:00
spectre_v2_bad_module = true ;
return false ;
}
2018-01-27 15:45:14 +01:00
static inline const char * spectre_v2_module_string ( void )
{
return spectre_v2_bad_module ? " - vulnerable module loaded " : " " ;
}
# else
static inline const char * spectre_v2_module_string ( void ) { return " " ; }
2018-01-25 15:50:28 -08:00
# endif
2018-01-11 21:46:26 +00:00
2018-11-25 19:33:41 +01:00
static inline bool match_option ( const char * arg , int arglen , const char * opt )
{
int len = strlen ( opt ) ;
return len = = arglen & & ! strncmp ( arg , opt , len ) ;
}
/* The kernel command line selection for spectre v2 */
enum spectre_v2_mitigation_cmd {
SPECTRE_V2_CMD_NONE ,
SPECTRE_V2_CMD_AUTO ,
SPECTRE_V2_CMD_FORCE ,
SPECTRE_V2_CMD_RETPOLINE ,
SPECTRE_V2_CMD_RETPOLINE_GENERIC ,
SPECTRE_V2_CMD_RETPOLINE_AMD ,
} ;
2018-11-25 19:33:45 +01:00
enum spectre_v2_user_cmd {
SPECTRE_V2_USER_CMD_NONE ,
SPECTRE_V2_USER_CMD_AUTO ,
SPECTRE_V2_USER_CMD_FORCE ,
2018-11-25 19:33:54 +01:00
SPECTRE_V2_USER_CMD_PRCTL ,
2018-11-25 19:33:56 +01:00
SPECTRE_V2_USER_CMD_PRCTL_IBPB ,
2018-11-25 19:33:55 +01:00
SPECTRE_V2_USER_CMD_SECCOMP ,
2018-11-25 19:33:56 +01:00
SPECTRE_V2_USER_CMD_SECCOMP_IBPB ,
2018-11-25 19:33:45 +01:00
} ;
static const char * const spectre_v2_user_strings [ ] = {
[ SPECTRE_V2_USER_NONE ] = " User space: Vulnerable " ,
[ SPECTRE_V2_USER_STRICT ] = " User space: Mitigation: STIBP protection " ,
2018-11-25 19:33:54 +01:00
[ SPECTRE_V2_USER_PRCTL ] = " User space: Mitigation: STIBP via prctl " ,
2018-11-25 19:33:55 +01:00
[ SPECTRE_V2_USER_SECCOMP ] = " User space: Mitigation: STIBP via seccomp and prctl " ,
2018-11-25 19:33:45 +01:00
} ;
static const struct {
const char * option ;
enum spectre_v2_user_cmd cmd ;
bool secure ;
2019-03-29 17:47:43 -07:00
} v2_user_options [ ] __initconst = {
2018-11-25 19:33:56 +01:00
{ " auto " , SPECTRE_V2_USER_CMD_AUTO , false } ,
{ " off " , SPECTRE_V2_USER_CMD_NONE , false } ,
{ " on " , SPECTRE_V2_USER_CMD_FORCE , true } ,
{ " prctl " , SPECTRE_V2_USER_CMD_PRCTL , false } ,
{ " prctl,ibpb " , SPECTRE_V2_USER_CMD_PRCTL_IBPB , false } ,
{ " seccomp " , SPECTRE_V2_USER_CMD_SECCOMP , false } ,
{ " seccomp,ibpb " , SPECTRE_V2_USER_CMD_SECCOMP_IBPB , false } ,
2018-11-25 19:33:45 +01:00
} ;
static void __init spec_v2_user_print_cond ( const char * reason , bool secure )
{
if ( boot_cpu_has_bug ( X86_BUG_SPECTRE_V2 ) ! = secure )
pr_info ( " spectre_v2_user=%s forced on command line. \n " , reason ) ;
}
static enum spectre_v2_user_cmd __init
spectre_v2_parse_user_cmdline ( enum spectre_v2_mitigation_cmd v2_cmd )
{
char arg [ 20 ] ;
int ret , i ;
switch ( v2_cmd ) {
case SPECTRE_V2_CMD_NONE :
return SPECTRE_V2_USER_CMD_NONE ;
case SPECTRE_V2_CMD_FORCE :
return SPECTRE_V2_USER_CMD_FORCE ;
default :
break ;
}
ret = cmdline_find_option ( boot_command_line , " spectre_v2_user " ,
arg , sizeof ( arg ) ) ;
if ( ret < 0 )
return SPECTRE_V2_USER_CMD_AUTO ;
for ( i = 0 ; i < ARRAY_SIZE ( v2_user_options ) ; i + + ) {
if ( match_option ( arg , ret , v2_user_options [ i ] . option ) ) {
spec_v2_user_print_cond ( v2_user_options [ i ] . option ,
v2_user_options [ i ] . secure ) ;
return v2_user_options [ i ] . cmd ;
}
}
pr_err ( " Unknown user space protection option (%s). Switching to AUTO select \n " , arg ) ;
return SPECTRE_V2_USER_CMD_AUTO ;
}
static void __init
spectre_v2_user_select_mitigation ( enum spectre_v2_mitigation_cmd v2_cmd )
{
enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE ;
bool smt_possible = IS_ENABLED ( CONFIG_SMP ) ;
2018-11-25 19:33:56 +01:00
enum spectre_v2_user_cmd cmd ;
2018-11-25 19:33:45 +01:00
if ( ! boot_cpu_has ( X86_FEATURE_IBPB ) & & ! boot_cpu_has ( X86_FEATURE_STIBP ) )
return ;
if ( cpu_smt_control = = CPU_SMT_FORCE_DISABLED | |
cpu_smt_control = = CPU_SMT_NOT_SUPPORTED )
smt_possible = false ;
2018-11-25 19:33:56 +01:00
cmd = spectre_v2_parse_user_cmdline ( v2_cmd ) ;
switch ( cmd ) {
2018-11-25 19:33:45 +01:00
case SPECTRE_V2_USER_CMD_NONE :
goto set_mode ;
case SPECTRE_V2_USER_CMD_FORCE :
mode = SPECTRE_V2_USER_STRICT ;
break ;
2018-11-25 19:33:54 +01:00
case SPECTRE_V2_USER_CMD_PRCTL :
2018-11-25 19:33:56 +01:00
case SPECTRE_V2_USER_CMD_PRCTL_IBPB :
2018-11-25 19:33:54 +01:00
mode = SPECTRE_V2_USER_PRCTL ;
break ;
2018-11-25 19:33:55 +01:00
case SPECTRE_V2_USER_CMD_AUTO :
case SPECTRE_V2_USER_CMD_SECCOMP :
2018-11-25 19:33:56 +01:00
case SPECTRE_V2_USER_CMD_SECCOMP_IBPB :
2018-11-25 19:33:55 +01:00
if ( IS_ENABLED ( CONFIG_SECCOMP ) )
mode = SPECTRE_V2_USER_SECCOMP ;
else
mode = SPECTRE_V2_USER_PRCTL ;
break ;
2018-11-25 19:33:45 +01:00
}
/* Initialize Indirect Branch Prediction Barrier */
if ( boot_cpu_has ( X86_FEATURE_IBPB ) ) {
setup_force_cpu_cap ( X86_FEATURE_USE_IBPB ) ;
2018-11-25 19:33:49 +01:00
2018-11-25 19:33:56 +01:00
switch ( cmd ) {
case SPECTRE_V2_USER_CMD_FORCE :
case SPECTRE_V2_USER_CMD_PRCTL_IBPB :
case SPECTRE_V2_USER_CMD_SECCOMP_IBPB :
2018-11-25 19:33:49 +01:00
static_branch_enable ( & switch_mm_always_ibpb ) ;
break ;
2018-11-25 19:33:56 +01:00
case SPECTRE_V2_USER_CMD_PRCTL :
case SPECTRE_V2_USER_CMD_AUTO :
case SPECTRE_V2_USER_CMD_SECCOMP :
2018-11-25 19:33:54 +01:00
static_branch_enable ( & switch_mm_cond_ibpb ) ;
break ;
2018-11-25 19:33:49 +01:00
default :
break ;
}
pr_info ( " mitigation: Enabling %s Indirect Branch Prediction Barrier \n " ,
2018-11-25 19:33:56 +01:00
static_key_enabled ( & switch_mm_always_ibpb ) ?
" always-on " : " conditional " ) ;
2018-11-25 19:33:45 +01:00
}
/* If enhanced IBRS is enabled no STIPB required */
if ( spectre_v2_enabled = = SPECTRE_V2_IBRS_ENHANCED )
return ;
2018-11-25 19:33:54 +01:00
/*
* If SMT is not possible or STIBP is not available clear the STIPB
* mode .
*/
if ( ! smt_possible | | ! boot_cpu_has ( X86_FEATURE_STIBP ) )
mode = SPECTRE_V2_USER_NONE ;
2018-11-25 19:33:45 +01:00
set_mode :
spectre_v2_user = mode ;
/* Only print the STIBP mode when SMT possible */
if ( smt_possible )
pr_info ( " %s \n " , spectre_v2_user_strings [ mode ] ) ;
}
2018-11-25 19:33:42 +01:00
static const char * const spectre_v2_strings [ ] = {
2018-11-25 19:33:41 +01:00
[ SPECTRE_V2_NONE ] = " Vulnerable " ,
[ SPECTRE_V2_RETPOLINE_MINIMAL ] = " Vulnerable: Minimal generic ASM retpoline " ,
[ SPECTRE_V2_RETPOLINE_MINIMAL_AMD ] = " Vulnerable: Minimal AMD ASM retpoline " ,
[ SPECTRE_V2_RETPOLINE_GENERIC ] = " Mitigation: Full generic retpoline " ,
[ SPECTRE_V2_RETPOLINE_AMD ] = " Mitigation: Full AMD retpoline " ,
[ SPECTRE_V2_IBRS_ENHANCED ] = " Mitigation: Enhanced IBRS " ,
} ;
static const struct {
const char * option ;
enum spectre_v2_mitigation_cmd cmd ;
bool secure ;
2019-03-29 17:47:43 -07:00
} mitigation_options [ ] __initconst = {
2018-11-25 19:33:41 +01:00
{ " off " , SPECTRE_V2_CMD_NONE , false } ,
{ " on " , SPECTRE_V2_CMD_FORCE , true } ,
{ " retpoline " , SPECTRE_V2_CMD_RETPOLINE , false } ,
{ " retpoline,amd " , SPECTRE_V2_CMD_RETPOLINE_AMD , false } ,
{ " retpoline,generic " , SPECTRE_V2_CMD_RETPOLINE_GENERIC , false } ,
{ " auto " , SPECTRE_V2_CMD_AUTO , false } ,
} ;
2018-11-25 19:33:44 +01:00
static void __init spec_v2_print_cond ( const char * reason , bool secure )
2018-01-11 21:46:26 +00:00
{
2018-11-25 19:33:44 +01:00
if ( boot_cpu_has_bug ( X86_BUG_SPECTRE_V2 ) ! = secure )
2018-02-01 11:27:21 +00:00
pr_info ( " %s selected on command line. \n " , reason ) ;
2018-01-11 21:46:26 +00:00
}
static inline bool retp_compiler ( void )
{
return __is_defined ( RETPOLINE ) ;
}
static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline ( void )
{
2018-11-25 19:33:41 +01:00
enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO ;
2018-01-11 21:46:26 +00:00
char arg [ 20 ] ;
2018-02-01 11:27:21 +00:00
int ret , i ;
2019-04-12 15:39:29 -05:00
if ( cmdline_find_option_bool ( boot_command_line , " nospectre_v2 " ) | |
cpu_mitigations_off ( ) )
2018-02-01 11:27:21 +00:00
return SPECTRE_V2_CMD_NONE ;
2018-11-25 19:33:30 +01:00
ret = cmdline_find_option ( boot_command_line , " spectre_v2 " , arg , sizeof ( arg ) ) ;
if ( ret < 0 )
return SPECTRE_V2_CMD_AUTO ;
for ( i = 0 ; i < ARRAY_SIZE ( mitigation_options ) ; i + + ) {
if ( ! match_option ( arg , ret , mitigation_options [ i ] . option ) )
continue ;
cmd = mitigation_options [ i ] . cmd ;
break ;
}
if ( i > = ARRAY_SIZE ( mitigation_options ) ) {
pr_err ( " unknown option (%s). Switching to AUTO select \n " , arg ) ;
return SPECTRE_V2_CMD_AUTO ;
2018-01-11 21:46:26 +00:00
}
2018-02-01 11:27:21 +00:00
if ( ( cmd = = SPECTRE_V2_CMD_RETPOLINE | |
cmd = = SPECTRE_V2_CMD_RETPOLINE_AMD | |
cmd = = SPECTRE_V2_CMD_RETPOLINE_GENERIC ) & &
! IS_ENABLED ( CONFIG_RETPOLINE ) ) {
2018-02-13 09:03:08 +01:00
pr_err ( " %s selected but not compiled in. Switching to AUTO select \n " , mitigation_options [ i ] . option ) ;
2018-01-11 21:46:26 +00:00
return SPECTRE_V2_CMD_AUTO ;
2018-02-01 11:27:21 +00:00
}
if ( cmd = = SPECTRE_V2_CMD_RETPOLINE_AMD & &
boot_cpu_data . x86_vendor ! = X86_VENDOR_AMD ) {
pr_err ( " retpoline,amd selected but CPU is not AMD. Switching to AUTO select \n " ) ;
return SPECTRE_V2_CMD_AUTO ;
}
2018-11-25 19:33:44 +01:00
spec_v2_print_cond ( mitigation_options [ i ] . option ,
mitigation_options [ i ] . secure ) ;
2018-02-01 11:27:21 +00:00
return cmd ;
2018-01-11 21:46:26 +00:00
}
static void __init spectre_v2_select_mitigation ( void )
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline ( ) ;
enum spectre_v2_mitigation mode = SPECTRE_V2_NONE ;
/*
* If the CPU is not affected and the command line mode is NONE or AUTO
* then nothing to do .
*/
if ( ! boot_cpu_has_bug ( X86_BUG_SPECTRE_V2 ) & &
( cmd = = SPECTRE_V2_CMD_NONE | | cmd = = SPECTRE_V2_CMD_AUTO ) )
return ;
switch ( cmd ) {
case SPECTRE_V2_CMD_NONE :
return ;
case SPECTRE_V2_CMD_FORCE :
case SPECTRE_V2_CMD_AUTO :
2018-08-01 11:42:25 -07:00
if ( boot_cpu_has ( X86_FEATURE_IBRS_ENHANCED ) ) {
mode = SPECTRE_V2_IBRS_ENHANCED ;
/* Force it so VMEXIT will restore correctly */
x86_spec_ctrl_base | = SPEC_CTRL_IBRS ;
wrmsrl ( MSR_IA32_SPEC_CTRL , x86_spec_ctrl_base ) ;
goto specv2_set_mode ;
}
2018-01-30 14:13:50 +08:00
if ( IS_ENABLED ( CONFIG_RETPOLINE ) )
goto retpoline_auto ;
break ;
2018-01-11 21:46:26 +00:00
case SPECTRE_V2_CMD_RETPOLINE_AMD :
if ( IS_ENABLED ( CONFIG_RETPOLINE ) )
goto retpoline_amd ;
break ;
case SPECTRE_V2_CMD_RETPOLINE_GENERIC :
if ( IS_ENABLED ( CONFIG_RETPOLINE ) )
goto retpoline_generic ;
break ;
case SPECTRE_V2_CMD_RETPOLINE :
if ( IS_ENABLED ( CONFIG_RETPOLINE ) )
goto retpoline_auto ;
break ;
}
2018-02-13 09:03:08 +01:00
pr_err ( " Spectre mitigation: kernel not compiled with retpoline; no mitigation available! " ) ;
2018-01-11 21:46:26 +00:00
return ;
retpoline_auto :
if ( boot_cpu_data . x86_vendor = = X86_VENDOR_AMD ) {
retpoline_amd :
if ( ! boot_cpu_has ( X86_FEATURE_LFENCE_RDTSC ) ) {
2018-02-13 09:03:08 +01:00
pr_err ( " Spectre mitigation: LFENCE not serializing, switching to generic retpoline \n " ) ;
2018-01-11 21:46:26 +00:00
goto retpoline_generic ;
}
mode = retp_compiler ( ) ? SPECTRE_V2_RETPOLINE_AMD :
SPECTRE_V2_RETPOLINE_MINIMAL_AMD ;
setup_force_cpu_cap ( X86_FEATURE_RETPOLINE_AMD ) ;
setup_force_cpu_cap ( X86_FEATURE_RETPOLINE ) ;
} else {
retpoline_generic :
mode = retp_compiler ( ) ? SPECTRE_V2_RETPOLINE_GENERIC :
SPECTRE_V2_RETPOLINE_MINIMAL ;
setup_force_cpu_cap ( X86_FEATURE_RETPOLINE ) ;
}
2018-08-01 11:42:25 -07:00
specv2_set_mode :
2018-01-11 21:46:26 +00:00
spectre_v2_enabled = mode ;
pr_info ( " %s \n " , spectre_v2_strings [ mode ] ) ;
2018-01-12 17:49:25 +00:00
/*
2018-07-26 13:14:55 +02:00
* If spectre v2 protection has been enabled , unconditionally fill
* RSB during a context switch ; this protects against two independent
* issues :
2018-01-12 17:49:25 +00:00
*
2018-07-26 13:14:55 +02:00
* - RSB underflow ( and switch to BTB ) on Skylake +
* - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
2018-01-12 17:49:25 +00:00
*/
2018-07-26 13:14:55 +02:00
setup_force_cpu_cap ( X86_FEATURE_RSB_CTXSW ) ;
pr_info ( " Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch \n " ) ;
2018-01-25 16:14:15 +00:00
2018-02-19 10:50:54 +00:00
/*
* Retpoline means the kernel is safe because it has no indirect
2018-08-01 11:42:25 -07:00
* branches . Enhanced IBRS protects firmware too , so , enable restricted
* speculation around firmware calls only when Enhanced IBRS isn ' t
* supported .
*
* Use " mode " to check Enhanced IBRS instead of boot_cpu_has ( ) , because
* the user might select retpoline on the kernel command line and if
* the CPU supports Enhanced IBRS , kernel might un - intentionally not
* enable IBRS around firmware calls .
2018-02-19 10:50:54 +00:00
*/
2018-08-01 11:42:25 -07:00
if ( boot_cpu_has ( X86_FEATURE_IBRS ) & & mode ! = SPECTRE_V2_IBRS_ENHANCED ) {
2018-02-19 10:50:54 +00:00
setup_force_cpu_cap ( X86_FEATURE_USE_IBRS_FW ) ;
pr_info ( " Enabling Restricted Speculation for firmware calls \n " ) ;
}
2018-09-25 14:38:55 +02:00
2018-11-25 19:33:45 +01:00
/* Set up IBPB and STIBP depending on the general spectre V2 command */
spectre_v2_user_select_mitigation ( cmd ) ;
2018-01-11 21:46:26 +00:00
}
2018-11-25 19:33:52 +01:00
static void update_stibp_msr ( void * __unused )
2018-11-25 19:33:41 +01:00
{
2018-11-25 19:33:52 +01:00
wrmsrl ( MSR_IA32_SPEC_CTRL , x86_spec_ctrl_base ) ;
2018-11-25 19:33:41 +01:00
}
2018-11-25 19:33:52 +01:00
/* Update x86_spec_ctrl_base in case SMT state changed. */
static void update_stibp_strict ( void )
2018-11-25 19:33:41 +01:00
{
2018-11-25 19:33:52 +01:00
u64 mask = x86_spec_ctrl_base & ~ SPEC_CTRL_STIBP ;
if ( sched_smt_active ( ) )
mask | = SPEC_CTRL_STIBP ;
if ( mask = = x86_spec_ctrl_base )
return ;
pr_info ( " Update user space SMT mitigation: STIBP %s \n " ,
mask & SPEC_CTRL_STIBP ? " always-on " : " off " ) ;
x86_spec_ctrl_base = mask ;
on_each_cpu ( update_stibp_msr , NULL , 1 ) ;
2018-11-25 19:33:41 +01:00
}
2018-11-25 19:33:54 +01:00
/* Update the static key controlling the evaluation of TIF_SPEC_IB */
static void update_indir_branch_cond ( void )
{
if ( sched_smt_active ( ) )
static_branch_enable ( & switch_to_cond_stibp ) ;
else
static_branch_disable ( & switch_to_cond_stibp ) ;
}
2019-04-02 10:00:51 -05:00
# undef pr_fmt
# define pr_fmt(fmt) fmt
2019-02-18 22:04:08 +01:00
/* Update the static key controlling the MDS CPU buffer clear in idle */
static void update_mds_branch_idle ( void )
{
/*
* Enable the idle clearing if SMT is active on CPUs which are
* affected only by MSBDS and not any other MDS variant .
*
* The other variants cannot be mitigated when SMT is enabled , so
* clearing the buffers on idle just to prevent the Store Buffer
* repartitioning leak would be a window dressing exercise .
*/
if ( ! boot_cpu_has_bug ( X86_BUG_MSBDS_ONLY ) )
return ;
if ( sched_smt_active ( ) )
static_branch_enable ( & mds_idle_clear ) ;
else
static_branch_disable ( & mds_idle_clear ) ;
}
2019-04-02 10:00:51 -05:00
# define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https: //www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
2018-11-25 19:33:41 +01:00
void arch_smt_update ( void )
{
2018-11-25 19:33:52 +01:00
/* Enhanced IBRS implies STIBP. No update required. */
if ( spectre_v2_enabled = = SPECTRE_V2_IBRS_ENHANCED )
2018-11-25 19:33:41 +01:00
return ;
mutex_lock ( & spec_ctrl_mutex ) ;
2018-11-25 19:33:52 +01:00
switch ( spectre_v2_user ) {
case SPECTRE_V2_USER_NONE :
break ;
case SPECTRE_V2_USER_STRICT :
update_stibp_strict ( ) ;
break ;
x86/speculation: Add prctl() control for indirect branch speculation
commit 9137bb27e60e554dab694eafa4cca241fa3a694f upstream.
Add the PR_SPEC_INDIRECT_BRANCH option for the PR_GET_SPECULATION_CTRL and
PR_SET_SPECULATION_CTRL prctls to allow fine grained per task control of
indirect branch speculation via STIBP and IBPB.
Invocations:
Check indirect branch speculation status with
- prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
Enable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
Disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
Force disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
See Documentation/userspace-api/spec_ctrl.rst.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Casey Schaufler <casey.schaufler@intel.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jon Masters <jcm@redhat.com>
Cc: Waiman Long <longman9394@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Dave Stewart <david.c.stewart@intel.com>
Cc: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20181125185005.866780996@linutronix.de
[bwh: Backported to 4.9:
- Renumber the PFA flags
- Drop changes in tools/include/uapi/linux/prctl.h
- Adjust filename]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-11-25 19:33:53 +01:00
case SPECTRE_V2_USER_PRCTL :
2018-11-25 19:33:55 +01:00
case SPECTRE_V2_USER_SECCOMP :
2018-11-25 19:33:54 +01:00
update_indir_branch_cond ( ) ;
x86/speculation: Add prctl() control for indirect branch speculation
commit 9137bb27e60e554dab694eafa4cca241fa3a694f upstream.
Add the PR_SPEC_INDIRECT_BRANCH option for the PR_GET_SPECULATION_CTRL and
PR_SET_SPECULATION_CTRL prctls to allow fine grained per task control of
indirect branch speculation via STIBP and IBPB.
Invocations:
Check indirect branch speculation status with
- prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
Enable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
Disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
Force disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
See Documentation/userspace-api/spec_ctrl.rst.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Casey Schaufler <casey.schaufler@intel.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jon Masters <jcm@redhat.com>
Cc: Waiman Long <longman9394@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Dave Stewart <david.c.stewart@intel.com>
Cc: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20181125185005.866780996@linutronix.de
[bwh: Backported to 4.9:
- Renumber the PFA flags
- Drop changes in tools/include/uapi/linux/prctl.h
- Adjust filename]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-11-25 19:33:53 +01:00
break ;
2018-11-25 19:33:41 +01:00
}
2018-11-25 19:33:52 +01:00
2019-02-20 09:40:40 +01:00
switch ( mds_mitigation ) {
case MDS_MITIGATION_FULL :
case MDS_MITIGATION_VMWERV :
2019-04-02 10:00:51 -05:00
if ( sched_smt_active ( ) & & ! boot_cpu_has ( X86_BUG_MSBDS_ONLY ) )
pr_warn_once ( MDS_MSG_SMT ) ;
2019-02-18 22:04:08 +01:00
update_mds_branch_idle ( ) ;
2019-02-20 09:40:40 +01:00
break ;
case MDS_MITIGATION_OFF :
break ;
}
2019-02-18 22:04:08 +01:00
2018-11-25 19:33:41 +01:00
mutex_unlock ( & spec_ctrl_mutex ) ;
}
2018-04-25 22:04:21 -04:00
# undef pr_fmt
# define pr_fmt(fmt) "Speculative Store Bypass: " fmt
2018-05-03 15:03:30 -07:00
static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE ;
2018-04-25 22:04:21 -04:00
/* The kernel command line selection */
enum ssb_mitigation_cmd {
SPEC_STORE_BYPASS_CMD_NONE ,
SPEC_STORE_BYPASS_CMD_AUTO ,
SPEC_STORE_BYPASS_CMD_ON ,
2018-04-29 15:26:40 +02:00
SPEC_STORE_BYPASS_CMD_PRCTL ,
2018-05-03 14:37:54 -07:00
SPEC_STORE_BYPASS_CMD_SECCOMP ,
2018-04-25 22:04:21 -04:00
} ;
2018-11-25 19:33:42 +01:00
static const char * const ssb_strings [ ] = {
2018-04-25 22:04:21 -04:00
[ SPEC_STORE_BYPASS_NONE ] = " Vulnerable " ,
2018-04-29 15:26:40 +02:00
[ SPEC_STORE_BYPASS_DISABLE ] = " Mitigation: Speculative Store Bypass disabled " ,
2018-05-03 14:37:54 -07:00
[ SPEC_STORE_BYPASS_PRCTL ] = " Mitigation: Speculative Store Bypass disabled via prctl " ,
[ SPEC_STORE_BYPASS_SECCOMP ] = " Mitigation: Speculative Store Bypass disabled via prctl and seccomp " ,
2018-04-25 22:04:21 -04:00
} ;
static const struct {
const char * option ;
enum ssb_mitigation_cmd cmd ;
2019-03-29 17:47:43 -07:00
} ssb_mitigation_options [ ] __initconst = {
2018-05-03 14:37:54 -07:00
{ " auto " , SPEC_STORE_BYPASS_CMD_AUTO } , /* Platform decides */
{ " on " , SPEC_STORE_BYPASS_CMD_ON } , /* Disable Speculative Store Bypass */
{ " off " , SPEC_STORE_BYPASS_CMD_NONE } , /* Don't touch Speculative Store Bypass */
{ " prctl " , SPEC_STORE_BYPASS_CMD_PRCTL } , /* Disable Speculative Store Bypass via prctl */
{ " seccomp " , SPEC_STORE_BYPASS_CMD_SECCOMP } , /* Disable Speculative Store Bypass via prctl and seccomp */
2018-04-25 22:04:21 -04:00
} ;
static enum ssb_mitigation_cmd __init ssb_parse_cmdline ( void )
{
enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO ;
char arg [ 20 ] ;
int ret , i ;
2019-04-12 15:39:29 -05:00
if ( cmdline_find_option_bool ( boot_command_line , " nospec_store_bypass_disable " ) | |
cpu_mitigations_off ( ) ) {
2018-04-25 22:04:21 -04:00
return SPEC_STORE_BYPASS_CMD_NONE ;
} else {
ret = cmdline_find_option ( boot_command_line , " spec_store_bypass_disable " ,
arg , sizeof ( arg ) ) ;
if ( ret < 0 )
return SPEC_STORE_BYPASS_CMD_AUTO ;
for ( i = 0 ; i < ARRAY_SIZE ( ssb_mitigation_options ) ; i + + ) {
if ( ! match_option ( arg , ret , ssb_mitigation_options [ i ] . option ) )
continue ;
cmd = ssb_mitigation_options [ i ] . cmd ;
break ;
}
if ( i > = ARRAY_SIZE ( ssb_mitigation_options ) ) {
pr_err ( " unknown option (%s). Switching to AUTO select \n " , arg ) ;
return SPEC_STORE_BYPASS_CMD_AUTO ;
}
}
return cmd ;
}
2018-05-10 22:47:18 +02:00
static enum ssb_mitigation __init __ssb_select_mitigation ( void )
2018-04-25 22:04:21 -04:00
{
enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE ;
enum ssb_mitigation_cmd cmd ;
2018-05-09 21:41:38 +02:00
if ( ! boot_cpu_has ( X86_FEATURE_SSBD ) )
2018-04-25 22:04:21 -04:00
return mode ;
cmd = ssb_parse_cmdline ( ) ;
if ( ! boot_cpu_has_bug ( X86_BUG_SPEC_STORE_BYPASS ) & &
( cmd = = SPEC_STORE_BYPASS_CMD_NONE | |
cmd = = SPEC_STORE_BYPASS_CMD_AUTO ) )
return mode ;
switch ( cmd ) {
case SPEC_STORE_BYPASS_CMD_AUTO :
2018-05-03 14:37:54 -07:00
case SPEC_STORE_BYPASS_CMD_SECCOMP :
/*
* Choose prctl + seccomp as the default mode if seccomp is
* enabled .
*/
if ( IS_ENABLED ( CONFIG_SECCOMP ) )
mode = SPEC_STORE_BYPASS_SECCOMP ;
else
mode = SPEC_STORE_BYPASS_PRCTL ;
2018-04-29 15:26:40 +02:00
break ;
2018-04-25 22:04:21 -04:00
case SPEC_STORE_BYPASS_CMD_ON :
mode = SPEC_STORE_BYPASS_DISABLE ;
break ;
2018-04-29 15:26:40 +02:00
case SPEC_STORE_BYPASS_CMD_PRCTL :
mode = SPEC_STORE_BYPASS_PRCTL ;
break ;
2018-04-25 22:04:21 -04:00
case SPEC_STORE_BYPASS_CMD_NONE :
break ;
}
2019-06-10 13:20:10 -04:00
/*
* If SSBD is controlled by the SPEC_CTRL MSR , then set the proper
* bit in the mask to allow guests to use the mitigation even in the
* case where the host does not enable it .
*/
if ( static_cpu_has ( X86_FEATURE_SPEC_CTRL_SSBD ) | |
static_cpu_has ( X86_FEATURE_AMD_SSBD ) ) {
x86_spec_ctrl_mask | = SPEC_CTRL_SSBD ;
}
2018-04-25 22:04:22 -04:00
/*
* We have three CPU feature flags that are in play here :
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible .
2018-05-09 21:41:38 +02:00
* - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
2018-04-25 22:04:22 -04:00
* - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
*/
2018-04-29 15:26:40 +02:00
if ( mode = = SPEC_STORE_BYPASS_DISABLE ) {
2018-04-25 22:04:21 -04:00
setup_force_cpu_cap ( X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ) ;
2018-04-25 22:04:22 -04:00
/*
2018-06-01 10:59:20 -04:00
* Intel uses the SPEC CTRL MSR Bit ( 2 ) for this , while AMD may
* use a completely different MSR and bit dependent on family .
2018-04-25 22:04:22 -04:00
*/
2018-07-02 16:36:02 -05:00
if ( ! static_cpu_has ( X86_FEATURE_SPEC_CTRL_SSBD ) & &
! static_cpu_has ( X86_FEATURE_AMD_SSBD ) ) {
2018-06-01 10:59:21 -04:00
x86_amd_ssb_disable ( ) ;
2018-07-02 16:36:02 -05:00
} else {
2018-05-09 21:41:38 +02:00
x86_spec_ctrl_base | = SPEC_CTRL_SSBD ;
2018-05-12 20:53:14 +02:00
wrmsrl ( MSR_IA32_SPEC_CTRL , x86_spec_ctrl_base ) ;
2018-04-25 22:04:22 -04:00
}
}
2018-04-25 22:04:21 -04:00
return mode ;
}
2018-05-11 16:50:35 -04:00
static void ssb_select_mitigation ( void )
2018-04-25 22:04:21 -04:00
{
ssb_mode = __ssb_select_mitigation ( ) ;
if ( boot_cpu_has_bug ( X86_BUG_SPEC_STORE_BYPASS ) )
pr_info ( " %s \n " , ssb_strings [ ssb_mode ] ) ;
}
2018-01-11 21:46:26 +00:00
# undef pr_fmt
2018-05-03 14:37:54 -07:00
# define pr_fmt(fmt) "Speculation prctl: " fmt
2018-01-11 21:46:26 +00:00
2018-11-28 10:56:57 +01:00
static void task_update_spec_tif ( struct task_struct * tsk )
2018-04-29 15:26:40 +02:00
{
2018-11-28 10:56:57 +01:00
/* Force the update of the real TIF bits */
set_tsk_thread_flag ( tsk , TIF_SPEC_FORCE_UPDATE ) ;
2018-11-25 19:33:51 +01:00
/*
* Immediately update the speculation control MSRs for the current
* task , but for a non - current task delay setting the CPU
* mitigation until it is scheduled next .
*
* This can only happen for SECCOMP mitigation . For PRCTL it ' s
* always the current task .
*/
2018-11-28 10:56:57 +01:00
if ( tsk = = current )
2018-11-25 19:33:51 +01:00
speculation_ctrl_update_current ( ) ;
}
static int ssb_prctl_set ( struct task_struct * task , unsigned long ctrl )
{
2018-05-03 14:37:54 -07:00
if ( ssb_mode ! = SPEC_STORE_BYPASS_PRCTL & &
ssb_mode ! = SPEC_STORE_BYPASS_SECCOMP )
2018-04-29 15:26:40 +02:00
return - ENXIO ;
2018-05-03 22:09:15 +02:00
switch ( ctrl ) {
case PR_SPEC_ENABLE :
/* If speculation is force disabled, enable is not allowed */
if ( task_spec_ssb_force_disable ( task ) )
return - EPERM ;
task_clear_spec_ssb_disable ( task ) ;
2018-11-28 10:56:57 +01:00
task_update_spec_tif ( task ) ;
2018-05-03 22:09:15 +02:00
break ;
case PR_SPEC_DISABLE :
task_set_spec_ssb_disable ( task ) ;
2018-11-28 10:56:57 +01:00
task_update_spec_tif ( task ) ;
2018-05-03 22:09:15 +02:00
break ;
case PR_SPEC_FORCE_DISABLE :
task_set_spec_ssb_disable ( task ) ;
task_set_spec_ssb_force_disable ( task ) ;
2018-11-28 10:56:57 +01:00
task_update_spec_tif ( task ) ;
2018-05-03 22:09:15 +02:00
break ;
default :
return - ERANGE ;
}
2018-04-29 15:26:40 +02:00
return 0 ;
}
x86/speculation: Add prctl() control for indirect branch speculation
commit 9137bb27e60e554dab694eafa4cca241fa3a694f upstream.
Add the PR_SPEC_INDIRECT_BRANCH option for the PR_GET_SPECULATION_CTRL and
PR_SET_SPECULATION_CTRL prctls to allow fine grained per task control of
indirect branch speculation via STIBP and IBPB.
Invocations:
Check indirect branch speculation status with
- prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
Enable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
Disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
Force disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
See Documentation/userspace-api/spec_ctrl.rst.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Casey Schaufler <casey.schaufler@intel.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jon Masters <jcm@redhat.com>
Cc: Waiman Long <longman9394@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Dave Stewart <david.c.stewart@intel.com>
Cc: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20181125185005.866780996@linutronix.de
[bwh: Backported to 4.9:
- Renumber the PFA flags
- Drop changes in tools/include/uapi/linux/prctl.h
- Adjust filename]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-11-25 19:33:53 +01:00
static int ib_prctl_set ( struct task_struct * task , unsigned long ctrl )
{
switch ( ctrl ) {
case PR_SPEC_ENABLE :
if ( spectre_v2_user = = SPECTRE_V2_USER_NONE )
return 0 ;
/*
* Indirect branch speculation is always disabled in strict
* mode .
*/
if ( spectre_v2_user = = SPECTRE_V2_USER_STRICT )
return - EPERM ;
task_clear_spec_ib_disable ( task ) ;
task_update_spec_tif ( task ) ;
break ;
case PR_SPEC_DISABLE :
case PR_SPEC_FORCE_DISABLE :
/*
* Indirect branch speculation is always allowed when
* mitigation is force disabled .
*/
if ( spectre_v2_user = = SPECTRE_V2_USER_NONE )
return - EPERM ;
if ( spectre_v2_user = = SPECTRE_V2_USER_STRICT )
return 0 ;
task_set_spec_ib_disable ( task ) ;
if ( ctrl = = PR_SPEC_FORCE_DISABLE )
task_set_spec_ib_force_disable ( task ) ;
task_update_spec_tif ( task ) ;
break ;
default :
return - ERANGE ;
}
return 0 ;
}
2018-05-04 15:12:06 +02:00
int arch_prctl_spec_ctrl_set ( struct task_struct * task , unsigned long which ,
unsigned long ctrl )
{
switch ( which ) {
case PR_SPEC_STORE_BYPASS :
return ssb_prctl_set ( task , ctrl ) ;
x86/speculation: Add prctl() control for indirect branch speculation
commit 9137bb27e60e554dab694eafa4cca241fa3a694f upstream.
Add the PR_SPEC_INDIRECT_BRANCH option for the PR_GET_SPECULATION_CTRL and
PR_SET_SPECULATION_CTRL prctls to allow fine grained per task control of
indirect branch speculation via STIBP and IBPB.
Invocations:
Check indirect branch speculation status with
- prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
Enable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
Disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
Force disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
See Documentation/userspace-api/spec_ctrl.rst.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Casey Schaufler <casey.schaufler@intel.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jon Masters <jcm@redhat.com>
Cc: Waiman Long <longman9394@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Dave Stewart <david.c.stewart@intel.com>
Cc: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20181125185005.866780996@linutronix.de
[bwh: Backported to 4.9:
- Renumber the PFA flags
- Drop changes in tools/include/uapi/linux/prctl.h
- Adjust filename]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-11-25 19:33:53 +01:00
case PR_SPEC_INDIRECT_BRANCH :
return ib_prctl_set ( task , ctrl ) ;
2018-05-04 15:12:06 +02:00
default :
return - ENODEV ;
}
}
# ifdef CONFIG_SECCOMP
void arch_seccomp_spec_mitigate ( struct task_struct * task )
{
2018-05-03 14:37:54 -07:00
if ( ssb_mode = = SPEC_STORE_BYPASS_SECCOMP )
ssb_prctl_set ( task , PR_SPEC_FORCE_DISABLE ) ;
2018-11-25 19:33:55 +01:00
if ( spectre_v2_user = = SPECTRE_V2_USER_SECCOMP )
ib_prctl_set ( task , PR_SPEC_FORCE_DISABLE ) ;
2018-05-04 15:12:06 +02:00
}
# endif
2018-05-01 15:19:04 -07:00
static int ssb_prctl_get ( struct task_struct * task )
2018-04-29 15:26:40 +02:00
{
switch ( ssb_mode ) {
case SPEC_STORE_BYPASS_DISABLE :
return PR_SPEC_DISABLE ;
2018-05-03 14:37:54 -07:00
case SPEC_STORE_BYPASS_SECCOMP :
2018-04-29 15:26:40 +02:00
case SPEC_STORE_BYPASS_PRCTL :
2018-05-03 22:09:15 +02:00
if ( task_spec_ssb_force_disable ( task ) )
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE ;
if ( task_spec_ssb_disable ( task ) )
2018-04-29 15:26:40 +02:00
return PR_SPEC_PRCTL | PR_SPEC_DISABLE ;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE ;
default :
if ( boot_cpu_has_bug ( X86_BUG_SPEC_STORE_BYPASS ) )
return PR_SPEC_ENABLE ;
return PR_SPEC_NOT_AFFECTED ;
}
}
x86/speculation: Add prctl() control for indirect branch speculation
commit 9137bb27e60e554dab694eafa4cca241fa3a694f upstream.
Add the PR_SPEC_INDIRECT_BRANCH option for the PR_GET_SPECULATION_CTRL and
PR_SET_SPECULATION_CTRL prctls to allow fine grained per task control of
indirect branch speculation via STIBP and IBPB.
Invocations:
Check indirect branch speculation status with
- prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
Enable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
Disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
Force disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
See Documentation/userspace-api/spec_ctrl.rst.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Casey Schaufler <casey.schaufler@intel.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jon Masters <jcm@redhat.com>
Cc: Waiman Long <longman9394@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Dave Stewart <david.c.stewart@intel.com>
Cc: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20181125185005.866780996@linutronix.de
[bwh: Backported to 4.9:
- Renumber the PFA flags
- Drop changes in tools/include/uapi/linux/prctl.h
- Adjust filename]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-11-25 19:33:53 +01:00
static int ib_prctl_get ( struct task_struct * task )
{
if ( ! boot_cpu_has_bug ( X86_BUG_SPECTRE_V2 ) )
return PR_SPEC_NOT_AFFECTED ;
switch ( spectre_v2_user ) {
case SPECTRE_V2_USER_NONE :
return PR_SPEC_ENABLE ;
case SPECTRE_V2_USER_PRCTL :
2018-11-25 19:33:55 +01:00
case SPECTRE_V2_USER_SECCOMP :
x86/speculation: Add prctl() control for indirect branch speculation
commit 9137bb27e60e554dab694eafa4cca241fa3a694f upstream.
Add the PR_SPEC_INDIRECT_BRANCH option for the PR_GET_SPECULATION_CTRL and
PR_SET_SPECULATION_CTRL prctls to allow fine grained per task control of
indirect branch speculation via STIBP and IBPB.
Invocations:
Check indirect branch speculation status with
- prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
Enable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
Disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
Force disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
See Documentation/userspace-api/spec_ctrl.rst.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Casey Schaufler <casey.schaufler@intel.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jon Masters <jcm@redhat.com>
Cc: Waiman Long <longman9394@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Dave Stewart <david.c.stewart@intel.com>
Cc: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20181125185005.866780996@linutronix.de
[bwh: Backported to 4.9:
- Renumber the PFA flags
- Drop changes in tools/include/uapi/linux/prctl.h
- Adjust filename]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-11-25 19:33:53 +01:00
if ( task_spec_ib_force_disable ( task ) )
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE ;
if ( task_spec_ib_disable ( task ) )
return PR_SPEC_PRCTL | PR_SPEC_DISABLE ;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE ;
case SPECTRE_V2_USER_STRICT :
return PR_SPEC_DISABLE ;
default :
return PR_SPEC_NOT_AFFECTED ;
}
}
2018-05-01 15:19:04 -07:00
int arch_prctl_spec_ctrl_get ( struct task_struct * task , unsigned long which )
2018-04-29 15:26:40 +02:00
{
switch ( which ) {
case PR_SPEC_STORE_BYPASS :
2018-05-01 15:19:04 -07:00
return ssb_prctl_get ( task ) ;
x86/speculation: Add prctl() control for indirect branch speculation
commit 9137bb27e60e554dab694eafa4cca241fa3a694f upstream.
Add the PR_SPEC_INDIRECT_BRANCH option for the PR_GET_SPECULATION_CTRL and
PR_SET_SPECULATION_CTRL prctls to allow fine grained per task control of
indirect branch speculation via STIBP and IBPB.
Invocations:
Check indirect branch speculation status with
- prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
Enable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
Disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
Force disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
See Documentation/userspace-api/spec_ctrl.rst.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Casey Schaufler <casey.schaufler@intel.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jon Masters <jcm@redhat.com>
Cc: Waiman Long <longman9394@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Dave Stewart <david.c.stewart@intel.com>
Cc: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20181125185005.866780996@linutronix.de
[bwh: Backported to 4.9:
- Renumber the PFA flags
- Drop changes in tools/include/uapi/linux/prctl.h
- Adjust filename]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-11-25 19:33:53 +01:00
case PR_SPEC_INDIRECT_BRANCH :
return ib_prctl_get ( task ) ;
2018-04-29 15:26:40 +02:00
default :
return - ENODEV ;
}
}
2018-04-25 22:04:22 -04:00
void x86_spec_ctrl_setup_ap ( void )
{
2018-05-10 19:13:18 +02:00
if ( boot_cpu_has ( X86_FEATURE_MSR_SPEC_CTRL ) )
2018-05-12 20:53:14 +02:00
wrmsrl ( MSR_IA32_SPEC_CTRL , x86_spec_ctrl_base ) ;
2018-05-20 20:52:05 +01:00
if ( ssb_mode = = SPEC_STORE_BYPASS_DISABLE )
2018-05-09 21:41:38 +02:00
x86_amd_ssb_disable ( ) ;
2018-04-25 22:04:22 -04:00
}
2018-06-20 16:42:57 -04:00
# undef pr_fmt
# define pr_fmt(fmt) "L1TF: " fmt
2018-07-13 16:23:16 +02:00
x86/bugs, kvm: Introduce boot-time control of L1TF mitigations
commit d90a7a0ec83fb86622cd7dae23255d3c50a99ec8 upstream
Introduce the 'l1tf=' kernel command line option to allow for boot-time
switching of mitigation that is used on processors affected by L1TF.
The possible values are:
full
Provides all available mitigations for the L1TF vulnerability. Disables
SMT and enables all mitigations in the hypervisors. SMT control via
/sys/devices/system/cpu/smt/control is still possible after boot.
Hypervisors will issue a warning when the first VM is started in
a potentially insecure configuration, i.e. SMT enabled or L1D flush
disabled.
full,force
Same as 'full', but disables SMT control. Implies the 'nosmt=force'
command line option. sysfs control of SMT and the hypervisor flush
control is disabled.
flush
Leaves SMT enabled and enables the conditional hypervisor mitigation.
Hypervisors will issue a warning when the first VM is started in a
potentially insecure configuration, i.e. SMT enabled or L1D flush
disabled.
flush,nosmt
Disables SMT and enables the conditional hypervisor mitigation. SMT
control via /sys/devices/system/cpu/smt/control is still possible
after boot. If SMT is reenabled or flushing disabled at runtime
hypervisors will issue a warning.
flush,nowarn
Same as 'flush', but hypervisors will not warn when
a VM is started in a potentially insecure configuration.
off
Disables hypervisor mitigations and doesn't emit any warnings.
Default is 'flush'.
Let KVM adhere to these semantics, which means:
- 'lt1f=full,force' : Performe L1D flushes. No runtime control
possible.
- 'l1tf=full'
- 'l1tf-flush'
- 'l1tf=flush,nosmt' : Perform L1D flushes and warn on VM start if
SMT has been runtime enabled or L1D flushing
has been run-time enabled
- 'l1tf=flush,nowarn' : Perform L1D flushes and no warnings are emitted.
- 'l1tf=off' : L1D flushes are not performed and no warnings
are emitted.
KVM can always override the L1D flushing behavior using its 'vmentry_l1d_flush'
module parameter except when lt1f=full,force is set.
This makes KVM's private 'nosmt' option redundant, and as it is a bit
non-systematic anyway (this is something to control globally, not on
hypervisor level), remove that option.
Add the missing Documentation entry for the l1tf vulnerability sysfs file
while at it.
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Jiri Kosina <jkosina@suse.cz>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
Link: https://lkml.kernel.org/r/20180713142323.202758176@linutronix.de
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-07-13 16:23:25 +02:00
/* Default mitigation for L1TF-affected CPUs */
enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH ;
2018-07-13 16:23:16 +02:00
# if IS_ENABLED(CONFIG_KVM_INTEL)
x86/bugs, kvm: Introduce boot-time control of L1TF mitigations
commit d90a7a0ec83fb86622cd7dae23255d3c50a99ec8 upstream
Introduce the 'l1tf=' kernel command line option to allow for boot-time
switching of mitigation that is used on processors affected by L1TF.
The possible values are:
full
Provides all available mitigations for the L1TF vulnerability. Disables
SMT and enables all mitigations in the hypervisors. SMT control via
/sys/devices/system/cpu/smt/control is still possible after boot.
Hypervisors will issue a warning when the first VM is started in
a potentially insecure configuration, i.e. SMT enabled or L1D flush
disabled.
full,force
Same as 'full', but disables SMT control. Implies the 'nosmt=force'
command line option. sysfs control of SMT and the hypervisor flush
control is disabled.
flush
Leaves SMT enabled and enables the conditional hypervisor mitigation.
Hypervisors will issue a warning when the first VM is started in a
potentially insecure configuration, i.e. SMT enabled or L1D flush
disabled.
flush,nosmt
Disables SMT and enables the conditional hypervisor mitigation. SMT
control via /sys/devices/system/cpu/smt/control is still possible
after boot. If SMT is reenabled or flushing disabled at runtime
hypervisors will issue a warning.
flush,nowarn
Same as 'flush', but hypervisors will not warn when
a VM is started in a potentially insecure configuration.
off
Disables hypervisor mitigations and doesn't emit any warnings.
Default is 'flush'.
Let KVM adhere to these semantics, which means:
- 'lt1f=full,force' : Performe L1D flushes. No runtime control
possible.
- 'l1tf=full'
- 'l1tf-flush'
- 'l1tf=flush,nosmt' : Perform L1D flushes and warn on VM start if
SMT has been runtime enabled or L1D flushing
has been run-time enabled
- 'l1tf=flush,nowarn' : Perform L1D flushes and no warnings are emitted.
- 'l1tf=off' : L1D flushes are not performed and no warnings
are emitted.
KVM can always override the L1D flushing behavior using its 'vmentry_l1d_flush'
module parameter except when lt1f=full,force is set.
This makes KVM's private 'nosmt' option redundant, and as it is a bit
non-systematic anyway (this is something to control globally, not on
hypervisor level), remove that option.
Add the missing Documentation entry for the l1tf vulnerability sysfs file
while at it.
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Jiri Kosina <jkosina@suse.cz>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
Link: https://lkml.kernel.org/r/20180713142323.202758176@linutronix.de
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-07-13 16:23:25 +02:00
EXPORT_SYMBOL_GPL ( l1tf_mitigation ) ;
2018-08-15 08:38:33 -07:00
# endif
2018-07-13 16:23:22 +02:00
enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO ;
2018-07-13 16:23:16 +02:00
EXPORT_SYMBOL_GPL ( l1tf_vmx_mitigation ) ;
2018-08-24 10:03:50 -07:00
/*
* These CPUs all support 44 bits physical address space internally in the
* cache but CPUID can report a smaller number of physical address bits .
*
* The L1TF mitigation uses the top most address bit for the inversion of
* non present PTEs . When the installed memory reaches into the top most
* address bit due to memory holes , which has been observed on machines
* which report 36 bits physical address bits and have 32 G RAM installed ,
* then the mitigation range check in l1tf_select_mitigation ( ) triggers .
* This is a false positive because the mitigation is still possible due to
* the fact that the cache uses 44 bit internally . Use the cache bits
* instead of the reported physical bits and adjust them on the affected
* machines to 44 bit if the reported bits are less than 44.
*/
static void override_cache_bits ( struct cpuinfo_x86 * c )
{
if ( c - > x86 ! = 6 )
return ;
switch ( c - > x86_model ) {
case INTEL_FAM6_NEHALEM :
case INTEL_FAM6_WESTMERE :
case INTEL_FAM6_SANDYBRIDGE :
case INTEL_FAM6_IVYBRIDGE :
case INTEL_FAM6_HASWELL_CORE :
case INTEL_FAM6_HASWELL_ULT :
case INTEL_FAM6_HASWELL_GT3E :
case INTEL_FAM6_BROADWELL_CORE :
case INTEL_FAM6_BROADWELL_GT3E :
case INTEL_FAM6_SKYLAKE_MOBILE :
case INTEL_FAM6_SKYLAKE_DESKTOP :
case INTEL_FAM6_KABYLAKE_MOBILE :
case INTEL_FAM6_KABYLAKE_DESKTOP :
if ( c - > x86_cache_bits < 44 )
c - > x86_cache_bits = 44 ;
break ;
}
}
2018-06-20 16:42:57 -04:00
static void __init l1tf_select_mitigation ( void )
{
u64 half_pa ;
if ( ! boot_cpu_has_bug ( X86_BUG_L1TF ) )
return ;
2019-04-12 15:39:29 -05:00
if ( cpu_mitigations_off ( ) )
l1tf_mitigation = L1TF_MITIGATION_OFF ;
else if ( cpu_mitigations_auto_nosmt ( ) )
l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT ;
2018-08-24 10:03:50 -07:00
override_cache_bits ( & boot_cpu_data ) ;
x86/bugs, kvm: Introduce boot-time control of L1TF mitigations
commit d90a7a0ec83fb86622cd7dae23255d3c50a99ec8 upstream
Introduce the 'l1tf=' kernel command line option to allow for boot-time
switching of mitigation that is used on processors affected by L1TF.
The possible values are:
full
Provides all available mitigations for the L1TF vulnerability. Disables
SMT and enables all mitigations in the hypervisors. SMT control via
/sys/devices/system/cpu/smt/control is still possible after boot.
Hypervisors will issue a warning when the first VM is started in
a potentially insecure configuration, i.e. SMT enabled or L1D flush
disabled.
full,force
Same as 'full', but disables SMT control. Implies the 'nosmt=force'
command line option. sysfs control of SMT and the hypervisor flush
control is disabled.
flush
Leaves SMT enabled and enables the conditional hypervisor mitigation.
Hypervisors will issue a warning when the first VM is started in a
potentially insecure configuration, i.e. SMT enabled or L1D flush
disabled.
flush,nosmt
Disables SMT and enables the conditional hypervisor mitigation. SMT
control via /sys/devices/system/cpu/smt/control is still possible
after boot. If SMT is reenabled or flushing disabled at runtime
hypervisors will issue a warning.
flush,nowarn
Same as 'flush', but hypervisors will not warn when
a VM is started in a potentially insecure configuration.
off
Disables hypervisor mitigations and doesn't emit any warnings.
Default is 'flush'.
Let KVM adhere to these semantics, which means:
- 'lt1f=full,force' : Performe L1D flushes. No runtime control
possible.
- 'l1tf=full'
- 'l1tf-flush'
- 'l1tf=flush,nosmt' : Perform L1D flushes and warn on VM start if
SMT has been runtime enabled or L1D flushing
has been run-time enabled
- 'l1tf=flush,nowarn' : Perform L1D flushes and no warnings are emitted.
- 'l1tf=off' : L1D flushes are not performed and no warnings
are emitted.
KVM can always override the L1D flushing behavior using its 'vmentry_l1d_flush'
module parameter except when lt1f=full,force is set.
This makes KVM's private 'nosmt' option redundant, and as it is a bit
non-systematic anyway (this is something to control globally, not on
hypervisor level), remove that option.
Add the missing Documentation entry for the l1tf vulnerability sysfs file
while at it.
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Jiri Kosina <jkosina@suse.cz>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
Link: https://lkml.kernel.org/r/20180713142323.202758176@linutronix.de
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-07-13 16:23:25 +02:00
switch ( l1tf_mitigation ) {
case L1TF_MITIGATION_OFF :
case L1TF_MITIGATION_FLUSH_NOWARN :
case L1TF_MITIGATION_FLUSH :
break ;
case L1TF_MITIGATION_FLUSH_NOSMT :
case L1TF_MITIGATION_FULL :
cpu_smt_disable ( false ) ;
break ;
case L1TF_MITIGATION_FULL_FORCE :
cpu_smt_disable ( true ) ;
break ;
}
2018-06-20 16:42:57 -04:00
# if CONFIG_PGTABLE_LEVELS == 2
pr_warn ( " Kernel not compiled for PAE. No mitigation for L1TF \n " ) ;
return ;
# endif
half_pa = ( u64 ) l1tf_pfn_limit ( ) < < PAGE_SHIFT ;
2018-11-13 19:49:10 +01:00
if ( l1tf_mitigation ! = L1TF_MITIGATION_OFF & &
e820_any_mapped ( half_pa , ULLONG_MAX - half_pa , E820_RAM ) ) {
2018-06-20 16:42:57 -04:00
pr_warn ( " System has more than MAX_PA/2 memory. L1TF mitigation not effective. \n " ) ;
2018-08-23 16:21:29 +02:00
pr_info ( " You may make it effective by booting the kernel with mem=%llu parameter. \n " ,
half_pa ) ;
pr_info ( " However, doing so will make a part of your RAM unusable. \n " ) ;
2019-02-19 11:10:49 +01:00
pr_info ( " Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide. \n " ) ;
2018-06-20 16:42:57 -04:00
return ;
}
setup_force_cpu_cap ( X86_FEATURE_L1TF_PTEINV ) ;
}
x86/bugs, kvm: Introduce boot-time control of L1TF mitigations
commit d90a7a0ec83fb86622cd7dae23255d3c50a99ec8 upstream
Introduce the 'l1tf=' kernel command line option to allow for boot-time
switching of mitigation that is used on processors affected by L1TF.
The possible values are:
full
Provides all available mitigations for the L1TF vulnerability. Disables
SMT and enables all mitigations in the hypervisors. SMT control via
/sys/devices/system/cpu/smt/control is still possible after boot.
Hypervisors will issue a warning when the first VM is started in
a potentially insecure configuration, i.e. SMT enabled or L1D flush
disabled.
full,force
Same as 'full', but disables SMT control. Implies the 'nosmt=force'
command line option. sysfs control of SMT and the hypervisor flush
control is disabled.
flush
Leaves SMT enabled and enables the conditional hypervisor mitigation.
Hypervisors will issue a warning when the first VM is started in a
potentially insecure configuration, i.e. SMT enabled or L1D flush
disabled.
flush,nosmt
Disables SMT and enables the conditional hypervisor mitigation. SMT
control via /sys/devices/system/cpu/smt/control is still possible
after boot. If SMT is reenabled or flushing disabled at runtime
hypervisors will issue a warning.
flush,nowarn
Same as 'flush', but hypervisors will not warn when
a VM is started in a potentially insecure configuration.
off
Disables hypervisor mitigations and doesn't emit any warnings.
Default is 'flush'.
Let KVM adhere to these semantics, which means:
- 'lt1f=full,force' : Performe L1D flushes. No runtime control
possible.
- 'l1tf=full'
- 'l1tf-flush'
- 'l1tf=flush,nosmt' : Perform L1D flushes and warn on VM start if
SMT has been runtime enabled or L1D flushing
has been run-time enabled
- 'l1tf=flush,nowarn' : Perform L1D flushes and no warnings are emitted.
- 'l1tf=off' : L1D flushes are not performed and no warnings
are emitted.
KVM can always override the L1D flushing behavior using its 'vmentry_l1d_flush'
module parameter except when lt1f=full,force is set.
This makes KVM's private 'nosmt' option redundant, and as it is a bit
non-systematic anyway (this is something to control globally, not on
hypervisor level), remove that option.
Add the missing Documentation entry for the l1tf vulnerability sysfs file
while at it.
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Jiri Kosina <jkosina@suse.cz>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com>
Link: https://lkml.kernel.org/r/20180713142323.202758176@linutronix.de
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-07-13 16:23:25 +02:00
static int __init l1tf_cmdline ( char * str )
{
if ( ! boot_cpu_has_bug ( X86_BUG_L1TF ) )
return 0 ;
if ( ! str )
return - EINVAL ;
if ( ! strcmp ( str , " off " ) )
l1tf_mitigation = L1TF_MITIGATION_OFF ;
else if ( ! strcmp ( str , " flush,nowarn " ) )
l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN ;
else if ( ! strcmp ( str , " flush " ) )
l1tf_mitigation = L1TF_MITIGATION_FLUSH ;
else if ( ! strcmp ( str , " flush,nosmt " ) )
l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT ;
else if ( ! strcmp ( str , " full " ) )
l1tf_mitigation = L1TF_MITIGATION_FULL ;
else if ( ! strcmp ( str , " full,force " ) )
l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE ;
return 0 ;
}
early_param ( " l1tf " , l1tf_cmdline ) ;
2018-06-20 16:42:57 -04:00
# undef pr_fmt
2019-04-02 10:00:51 -05:00
# define pr_fmt(fmt) fmt
2018-06-20 16:42:57 -04:00
2018-01-07 22:48:01 +01:00
# ifdef CONFIG_SYSFS
2018-04-25 22:04:17 -04:00
2018-07-13 16:23:16 +02:00
# define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
# if IS_ENABLED(CONFIG_KVM_INTEL)
2018-11-25 19:33:42 +01:00
static const char * const l1tf_vmx_states [ ] = {
2018-07-13 16:23:18 +02:00
[ VMENTER_L1D_FLUSH_AUTO ] = " auto " ,
[ VMENTER_L1D_FLUSH_NEVER ] = " vulnerable " ,
[ VMENTER_L1D_FLUSH_COND ] = " conditional cache flushes " ,
[ VMENTER_L1D_FLUSH_ALWAYS ] = " cache flushes " ,
[ VMENTER_L1D_FLUSH_EPT_DISABLED ] = " EPT disabled " ,
2018-08-05 16:07:46 +02:00
[ VMENTER_L1D_FLUSH_NOT_REQUIRED ] = " flush not necessary "
2018-07-13 16:23:16 +02:00
} ;
static ssize_t l1tf_show_state ( char * buf )
{
if ( l1tf_vmx_mitigation = = VMENTER_L1D_FLUSH_AUTO )
return sprintf ( buf , " %s \n " , L1TF_DEFAULT_MSG ) ;
2018-08-05 16:07:45 +02:00
if ( l1tf_vmx_mitigation = = VMENTER_L1D_FLUSH_EPT_DISABLED | |
( l1tf_vmx_mitigation = = VMENTER_L1D_FLUSH_NEVER & &
2018-11-25 19:33:40 +01:00
sched_smt_active ( ) ) ) {
2018-08-05 16:07:45 +02:00
return sprintf ( buf , " %s; VMX: %s \n " , L1TF_DEFAULT_MSG ,
l1tf_vmx_states [ l1tf_vmx_mitigation ] ) ;
2018-11-25 19:33:40 +01:00
}
2018-08-05 16:07:45 +02:00
return sprintf ( buf , " %s; VMX: %s, SMT %s \n " , L1TF_DEFAULT_MSG ,
l1tf_vmx_states [ l1tf_vmx_mitigation ] ,
2018-11-25 19:33:40 +01:00
sched_smt_active ( ) ? " vulnerable " : " disabled " ) ;
2018-07-13 16:23:16 +02:00
}
# else
static ssize_t l1tf_show_state ( char * buf )
{
return sprintf ( buf , " %s \n " , L1TF_DEFAULT_MSG ) ;
}
# endif
2019-02-18 22:51:43 +01:00
static ssize_t mds_show_state ( char * buf )
{
# ifdef CONFIG_HYPERVISOR_GUEST
2019-07-25 10:39:09 +08:00
if ( boot_cpu_has ( X86_FEATURE_HYPERVISOR ) ) {
2019-02-18 22:51:43 +01:00
return sprintf ( buf , " %s; SMT Host state unknown \n " ,
mds_strings [ mds_mitigation ] ) ;
}
# endif
if ( boot_cpu_has ( X86_BUG_MSBDS_ONLY ) ) {
return sprintf ( buf , " %s; SMT %s \n " , mds_strings [ mds_mitigation ] ,
2019-04-12 17:50:58 -04:00
( mds_mitigation = = MDS_MITIGATION_OFF ? " vulnerable " :
sched_smt_active ( ) ? " mitigated " : " disabled " ) ) ;
2019-02-18 22:51:43 +01:00
}
return sprintf ( buf , " %s; SMT %s \n " , mds_strings [ mds_mitigation ] ,
sched_smt_active ( ) ? " vulnerable " : " disabled " ) ;
}
2018-11-25 19:33:32 +01:00
static char * stibp_state ( void )
{
2018-11-25 19:33:33 +01:00
if ( spectre_v2_enabled = = SPECTRE_V2_IBRS_ENHANCED )
return " " ;
2018-11-25 19:33:45 +01:00
switch ( spectre_v2_user ) {
case SPECTRE_V2_USER_NONE :
return " , STIBP: disabled " ;
case SPECTRE_V2_USER_STRICT :
return " , STIBP: forced " ;
x86/speculation: Add prctl() control for indirect branch speculation
commit 9137bb27e60e554dab694eafa4cca241fa3a694f upstream.
Add the PR_SPEC_INDIRECT_BRANCH option for the PR_GET_SPECULATION_CTRL and
PR_SET_SPECULATION_CTRL prctls to allow fine grained per task control of
indirect branch speculation via STIBP and IBPB.
Invocations:
Check indirect branch speculation status with
- prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
Enable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
Disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
Force disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
See Documentation/userspace-api/spec_ctrl.rst.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Casey Schaufler <casey.schaufler@intel.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jon Masters <jcm@redhat.com>
Cc: Waiman Long <longman9394@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Dave Stewart <david.c.stewart@intel.com>
Cc: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20181125185005.866780996@linutronix.de
[bwh: Backported to 4.9:
- Renumber the PFA flags
- Drop changes in tools/include/uapi/linux/prctl.h
- Adjust filename]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-11-25 19:33:53 +01:00
case SPECTRE_V2_USER_PRCTL :
2018-11-25 19:33:55 +01:00
case SPECTRE_V2_USER_SECCOMP :
2018-11-25 19:33:54 +01:00
if ( static_key_enabled ( & switch_to_cond_stibp ) )
return " , STIBP: conditional " ;
2018-11-25 19:33:45 +01:00
}
return " " ;
2018-11-25 19:33:32 +01:00
}
static char * ibpb_state ( void )
{
2018-11-25 19:33:49 +01:00
if ( boot_cpu_has ( X86_FEATURE_IBPB ) ) {
2018-11-25 19:33:54 +01:00
if ( static_key_enabled ( & switch_mm_always_ibpb ) )
2018-11-25 19:33:49 +01:00
return " , IBPB: always-on " ;
2018-11-25 19:33:54 +01:00
if ( static_key_enabled ( & switch_mm_cond_ibpb ) )
return " , IBPB: conditional " ;
return " , IBPB: disabled " ;
2018-11-25 19:33:49 +01:00
}
return " " ;
2018-11-25 19:33:32 +01:00
}
2018-05-10 22:47:32 +02:00
static ssize_t cpu_show_common ( struct device * dev , struct device_attribute * attr ,
2018-05-11 16:50:35 -04:00
char * buf , unsigned int bug )
2018-01-07 22:48:01 +01:00
{
2018-04-25 22:04:17 -04:00
if ( ! boot_cpu_has_bug ( bug ) )
2018-01-07 22:48:01 +01:00
return sprintf ( buf , " Not affected \n " ) ;
2018-04-25 22:04:17 -04:00
switch ( bug ) {
case X86_BUG_CPU_MELTDOWN :
if ( boot_cpu_has ( X86_FEATURE_KAISER ) )
return sprintf ( buf , " Mitigation: PTI \n " ) ;
break ;
case X86_BUG_SPECTRE_V1 :
return sprintf ( buf , " Mitigation: __user pointer sanitization \n " ) ;
case X86_BUG_SPECTRE_V2 :
2018-11-25 19:33:31 +01:00
return sprintf ( buf , " %s%s%s%s%s%s \n " , spectre_v2_strings [ spectre_v2_enabled ] ,
2018-11-25 19:33:32 +01:00
ibpb_state ( ) ,
2018-04-25 22:04:17 -04:00
boot_cpu_has ( X86_FEATURE_USE_IBRS_FW ) ? " , IBRS_FW " : " " ,
2018-11-25 19:33:32 +01:00
stibp_state ( ) ,
2018-09-25 14:39:28 +02:00
boot_cpu_has ( X86_FEATURE_RSB_CTXSW ) ? " , RSB filling " : " " ,
2018-04-25 22:04:17 -04:00
spectre_v2_module_string ( ) ) ;
2018-04-25 22:04:21 -04:00
case X86_BUG_SPEC_STORE_BYPASS :
return sprintf ( buf , " %s \n " , ssb_strings [ ssb_mode ] ) ;
2018-06-13 15:48:26 -07:00
case X86_BUG_L1TF :
if ( boot_cpu_has ( X86_FEATURE_L1TF_PTEINV ) )
2018-07-13 16:23:16 +02:00
return l1tf_show_state ( buf ) ;
2018-06-13 15:48:26 -07:00
break ;
2019-02-18 22:51:43 +01:00
case X86_BUG_MDS :
return mds_show_state ( buf ) ;
2018-04-25 22:04:17 -04:00
default :
break ;
}
2018-01-07 22:48:01 +01:00
return sprintf ( buf , " Vulnerable \n " ) ;
}
2018-04-25 22:04:17 -04:00
ssize_t cpu_show_meltdown ( struct device * dev , struct device_attribute * attr , char * buf )
{
return cpu_show_common ( dev , attr , buf , X86_BUG_CPU_MELTDOWN ) ;
}
2018-02-13 09:03:08 +01:00
ssize_t cpu_show_spectre_v1 ( struct device * dev , struct device_attribute * attr , char * buf )
2018-01-07 22:48:01 +01:00
{
2018-04-25 22:04:17 -04:00
return cpu_show_common ( dev , attr , buf , X86_BUG_SPECTRE_V1 ) ;
2018-01-07 22:48:01 +01:00
}
2018-02-13 09:03:08 +01:00
ssize_t cpu_show_spectre_v2 ( struct device * dev , struct device_attribute * attr , char * buf )
2018-01-07 22:48:01 +01:00
{
2018-04-25 22:04:17 -04:00
return cpu_show_common ( dev , attr , buf , X86_BUG_SPECTRE_V2 ) ;
2018-01-07 22:48:01 +01:00
}
2018-04-25 22:04:20 -04:00
ssize_t cpu_show_spec_store_bypass ( struct device * dev , struct device_attribute * attr , char * buf )
{
return cpu_show_common ( dev , attr , buf , X86_BUG_SPEC_STORE_BYPASS ) ;
}
2018-06-13 15:48:26 -07:00
ssize_t cpu_show_l1tf ( struct device * dev , struct device_attribute * attr , char * buf )
{
return cpu_show_common ( dev , attr , buf , X86_BUG_L1TF ) ;
}
2019-02-18 22:51:43 +01:00
ssize_t cpu_show_mds ( struct device * dev , struct device_attribute * attr , char * buf )
{
return cpu_show_common ( dev , attr , buf , X86_BUG_MDS ) ;
}
2018-01-07 22:48:01 +01:00
# endif