Merge 4.9.216 branch 'android-4.9-q' into tw10-android-4.9-q
Conflicts:
drivers/scsi/ufs/ufshcd.c Checked out ' 39f4ec1ef6
', similar fix was found in source
drivers/staging/android/ashmem.c
drivers/usb/gadget/function/u_serial.c
fs/ext4/ext4.h
security/selinux/avc.c
This commit is contained in:
commit
fa3b3e9fc6
448 changed files with 3962 additions and 1699 deletions
|
@ -8606,6 +8606,12 @@ S: Maintained
|
|||
F: Documentation/scsi/NinjaSCSI.txt
|
||||
F: drivers/scsi/nsp32*
|
||||
|
||||
NINTENDO HID DRIVER
|
||||
M: Daniel J. Ogorchock <djogorchock@gmail.com>
|
||||
L: linux-input@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/hid/hid-nintendo*
|
||||
|
||||
NIOS2 ARCHITECTURE
|
||||
M: Ley Foon Tan <lftan@altera.com>
|
||||
L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 212
|
||||
SUBLEVEL = 216
|
||||
EXTRAVERSION =
|
||||
NAME = Roaring Lionus
|
||||
|
||||
|
|
|
@ -63,6 +63,7 @@
|
|||
interrupt-names = "macirq";
|
||||
phy-mode = "rgmii";
|
||||
snps,pbl = < 32 >;
|
||||
snps,multicast-filter-bins = <256>;
|
||||
clocks = <&apbclk>;
|
||||
clock-names = "stmmaceth";
|
||||
max-speed = <100>;
|
||||
|
|
|
@ -7,7 +7,7 @@ menuconfig ARC_PLAT_EZNPS
|
|||
bool "\"EZchip\" ARC dev platform"
|
||||
select ARC_HAS_COH_CACHES if SMP
|
||||
select CPU_BIG_ENDIAN
|
||||
select CLKSRC_NPS
|
||||
select CLKSRC_NPS if !PHYS_ADDR_T_64BIT
|
||||
select EZNPS_GIC
|
||||
select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET
|
||||
help
|
||||
|
|
|
@ -2044,7 +2044,7 @@ config XIP_PHYS_ADDR
|
|||
config KEXEC
|
||||
bool "Kexec system call (EXPERIMENTAL)"
|
||||
depends on (!SMP || PM_SLEEP_SMP)
|
||||
depends on !CPU_V7M
|
||||
depends on MMU
|
||||
select KEXEC_CORE
|
||||
help
|
||||
kexec is a system call that implements the ability to shutdown your
|
||||
|
|
|
@ -27,6 +27,27 @@
|
|||
reg = <0x0 0x80000000 0x0 0x80000000>;
|
||||
};
|
||||
|
||||
main_12v0: fixedregulator-main_12v0 {
|
||||
/* main supply */
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "main_12v0";
|
||||
regulator-min-microvolt = <12000000>;
|
||||
regulator-max-microvolt = <12000000>;
|
||||
regulator-always-on;
|
||||
regulator-boot-on;
|
||||
};
|
||||
|
||||
evm_5v0: fixedregulator-evm_5v0 {
|
||||
/* Output of TPS54531D */
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "evm_5v0";
|
||||
regulator-min-microvolt = <5000000>;
|
||||
regulator-max-microvolt = <5000000>;
|
||||
vin-supply = <&main_12v0>;
|
||||
regulator-always-on;
|
||||
regulator-boot-on;
|
||||
};
|
||||
|
||||
vdd_3v3: fixedregulator-vdd_3v3 {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "vdd_3v3";
|
||||
|
|
|
@ -505,7 +505,7 @@
|
|||
};
|
||||
|
||||
mdio0: mdio@2d24000 {
|
||||
compatible = "fsl,etsec2-mdio";
|
||||
compatible = "gianfar";
|
||||
device_type = "mdio";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
@ -513,7 +513,7 @@
|
|||
};
|
||||
|
||||
mdio1: mdio@2d64000 {
|
||||
compatible = "fsl,etsec2-mdio";
|
||||
compatible = "gianfar";
|
||||
device_type = "mdio";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
|
|
@ -67,6 +67,14 @@
|
|||
<0xf0000100 0x100>;
|
||||
};
|
||||
|
||||
timer@f0000200 {
|
||||
compatible = "arm,cortex-a9-global-timer";
|
||||
reg = <0xf0000200 0x100>;
|
||||
interrupts = <GIC_PPI 11
|
||||
(GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_EDGE_RISING)>;
|
||||
clocks = <&cpg_clocks R8A7779_CLK_ZS>;
|
||||
};
|
||||
|
||||
timer@f0000600 {
|
||||
compatible = "arm,cortex-a9-twd-timer";
|
||||
reg = <0xf0000600 0x20>;
|
||||
|
|
|
@ -1109,49 +1109,49 @@
|
|||
usart0_clk: usart0_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <12>;
|
||||
atmel,clk-output-range = <0 66000000>;
|
||||
atmel,clk-output-range = <0 83000000>;
|
||||
};
|
||||
|
||||
usart1_clk: usart1_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <13>;
|
||||
atmel,clk-output-range = <0 66000000>;
|
||||
atmel,clk-output-range = <0 83000000>;
|
||||
};
|
||||
|
||||
usart2_clk: usart2_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <14>;
|
||||
atmel,clk-output-range = <0 66000000>;
|
||||
atmel,clk-output-range = <0 83000000>;
|
||||
};
|
||||
|
||||
usart3_clk: usart3_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <15>;
|
||||
atmel,clk-output-range = <0 66000000>;
|
||||
atmel,clk-output-range = <0 83000000>;
|
||||
};
|
||||
|
||||
uart0_clk: uart0_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <16>;
|
||||
atmel,clk-output-range = <0 66000000>;
|
||||
atmel,clk-output-range = <0 83000000>;
|
||||
};
|
||||
|
||||
twi0_clk: twi0_clk {
|
||||
reg = <18>;
|
||||
#clock-cells = <0>;
|
||||
atmel,clk-output-range = <0 16625000>;
|
||||
atmel,clk-output-range = <0 41500000>;
|
||||
};
|
||||
|
||||
twi1_clk: twi1_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <19>;
|
||||
atmel,clk-output-range = <0 16625000>;
|
||||
atmel,clk-output-range = <0 41500000>;
|
||||
};
|
||||
|
||||
twi2_clk: twi2_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <20>;
|
||||
atmel,clk-output-range = <0 16625000>;
|
||||
atmel,clk-output-range = <0 41500000>;
|
||||
};
|
||||
|
||||
mci0_clk: mci0_clk {
|
||||
|
@ -1167,19 +1167,19 @@
|
|||
spi0_clk: spi0_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <24>;
|
||||
atmel,clk-output-range = <0 133000000>;
|
||||
atmel,clk-output-range = <0 166000000>;
|
||||
};
|
||||
|
||||
spi1_clk: spi1_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <25>;
|
||||
atmel,clk-output-range = <0 133000000>;
|
||||
atmel,clk-output-range = <0 166000000>;
|
||||
};
|
||||
|
||||
tcb0_clk: tcb0_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <26>;
|
||||
atmel,clk-output-range = <0 133000000>;
|
||||
atmel,clk-output-range = <0 166000000>;
|
||||
};
|
||||
|
||||
pwm_clk: pwm_clk {
|
||||
|
@ -1190,7 +1190,7 @@
|
|||
adc_clk: adc_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <29>;
|
||||
atmel,clk-output-range = <0 66000000>;
|
||||
atmel,clk-output-range = <0 83000000>;
|
||||
};
|
||||
|
||||
dma0_clk: dma0_clk {
|
||||
|
@ -1221,13 +1221,13 @@
|
|||
ssc0_clk: ssc0_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <38>;
|
||||
atmel,clk-output-range = <0 66000000>;
|
||||
atmel,clk-output-range = <0 83000000>;
|
||||
};
|
||||
|
||||
ssc1_clk: ssc1_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <39>;
|
||||
atmel,clk-output-range = <0 66000000>;
|
||||
atmel,clk-output-range = <0 83000000>;
|
||||
};
|
||||
|
||||
sha_clk: sha_clk {
|
||||
|
|
|
@ -37,13 +37,13 @@
|
|||
can0_clk: can0_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <40>;
|
||||
atmel,clk-output-range = <0 66000000>;
|
||||
atmel,clk-output-range = <0 83000000>;
|
||||
};
|
||||
|
||||
can1_clk: can1_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <41>;
|
||||
atmel,clk-output-range = <0 66000000>;
|
||||
atmel,clk-output-range = <0 83000000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
tcb1_clk: tcb1_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <27>;
|
||||
atmel,clk-output-range = <0 166000000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -42,13 +42,13 @@
|
|||
uart0_clk: uart0_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <16>;
|
||||
atmel,clk-output-range = <0 66000000>;
|
||||
atmel,clk-output-range = <0 83000000>;
|
||||
};
|
||||
|
||||
uart1_clk: uart1_clk {
|
||||
#clock-cells = <0>;
|
||||
reg = <17>;
|
||||
atmel,clk-output-range = <0 66000000>;
|
||||
atmel,clk-output-range = <0 83000000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -86,6 +86,8 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
|
|||
obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o
|
||||
obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
|
||||
endif
|
||||
AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a
|
||||
obj-$(CONFIG_SOC_IMX6) += resume-imx6.o
|
||||
obj-$(CONFIG_SOC_IMX6) += pm-imx6.o
|
||||
|
||||
obj-$(CONFIG_SOC_IMX1) += mach-imx1.o
|
||||
|
|
|
@ -112,17 +112,17 @@ void imx_cpu_die(unsigned int cpu);
|
|||
int imx_cpu_kill(unsigned int cpu);
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
void v7_cpu_resume(void);
|
||||
void imx53_suspend(void __iomem *ocram_vbase);
|
||||
extern const u32 imx53_suspend_sz;
|
||||
void imx6_suspend(void __iomem *ocram_vbase);
|
||||
#else
|
||||
static inline void v7_cpu_resume(void) {}
|
||||
static inline void imx53_suspend(void __iomem *ocram_vbase) {}
|
||||
static const u32 imx53_suspend_sz;
|
||||
static inline void imx6_suspend(void __iomem *ocram_vbase) {}
|
||||
#endif
|
||||
|
||||
void v7_cpu_resume(void);
|
||||
|
||||
void imx6_pm_ccm_init(const char *ccm_compat);
|
||||
void imx6q_pm_init(void);
|
||||
void imx6dl_pm_init(void);
|
||||
|
|
24
arch/arm/mach-imx/resume-imx6.S
Normal file
24
arch/arm/mach-imx/resume-imx6.S
Normal file
|
@ -0,0 +1,24 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
/*
|
||||
* Copyright 2014 Freescale Semiconductor, Inc.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/hardware/cache-l2x0.h>
|
||||
#include "hardware.h"
|
||||
|
||||
/*
|
||||
* The following code must assume it is running from physical address
|
||||
* where absolute virtual addresses to the data section have to be
|
||||
* turned into relative ones.
|
||||
*/
|
||||
|
||||
ENTRY(v7_cpu_resume)
|
||||
bl v7_invalidate_l1
|
||||
#ifdef CONFIG_CACHE_L2X0
|
||||
bl l2c310_early_resume
|
||||
#endif
|
||||
b cpu_resume
|
||||
ENDPROC(v7_cpu_resume)
|
|
@ -333,17 +333,3 @@ resume:
|
|||
|
||||
ret lr
|
||||
ENDPROC(imx6_suspend)
|
||||
|
||||
/*
|
||||
* The following code must assume it is running from physical address
|
||||
* where absolute virtual addresses to the data section have to be
|
||||
* turned into relative ones.
|
||||
*/
|
||||
|
||||
ENTRY(v7_cpu_resume)
|
||||
bl v7_invalidate_l1
|
||||
#ifdef CONFIG_CACHE_L2X0
|
||||
bl l2c310_early_resume
|
||||
#endif
|
||||
b cpu_resume
|
||||
ENDPROC(v7_cpu_resume)
|
||||
|
|
|
@ -382,6 +382,14 @@ _pll_m_c_x_done:
|
|||
pll_locked r1, r0, CLK_RESET_PLLC_BASE
|
||||
pll_locked r1, r0, CLK_RESET_PLLX_BASE
|
||||
|
||||
tegra_get_soc_id TEGRA_APB_MISC_BASE, r1
|
||||
cmp r1, #TEGRA30
|
||||
beq 1f
|
||||
ldr r1, [r0, #CLK_RESET_PLLP_BASE]
|
||||
bic r1, r1, #(1<<31) @ disable PllP bypass
|
||||
str r1, [r0, #CLK_RESET_PLLP_BASE]
|
||||
1:
|
||||
|
||||
mov32 r7, TEGRA_TMRUS_BASE
|
||||
ldr r1, [r7]
|
||||
add r1, r1, #LOCK_DELAY
|
||||
|
@ -641,7 +649,10 @@ tegra30_switch_cpu_to_clk32k:
|
|||
str r0, [r4, #PMC_PLLP_WB0_OVERRIDE]
|
||||
|
||||
/* disable PLLP, PLLA, PLLC and PLLX */
|
||||
tegra_get_soc_id TEGRA_APB_MISC_BASE, r1
|
||||
cmp r1, #TEGRA30
|
||||
ldr r0, [r5, #CLK_RESET_PLLP_BASE]
|
||||
orrne r0, r0, #(1 << 31) @ enable PllP bypass on fast cluster
|
||||
bic r0, r0, #(1 << 30)
|
||||
str r0, [r5, #CLK_RESET_PLLP_BASE]
|
||||
ldr r0, [r5, #CLK_RESET_PLLA_BASE]
|
||||
|
|
|
@ -18,7 +18,7 @@ include $(srctree)/arch/arm64/boot/dts/Makefile
|
|||
|
||||
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
|
||||
|
||||
targets := Image Image.gz
|
||||
targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo
|
||||
|
||||
DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES))
|
||||
ifneq ($(DTB_NAMES),)
|
||||
|
|
|
@ -29,13 +29,16 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt,
|
|||
void __init apply_alternatives_all(void);
|
||||
void apply_alternatives(void *start, size_t length);
|
||||
|
||||
#define ALTINSTR_ENTRY(feature,cb) \
|
||||
#define ALTINSTR_ENTRY(feature) \
|
||||
" .word 661b - .\n" /* label */ \
|
||||
" .if " __stringify(cb) " == 0\n" \
|
||||
" .word 663f - .\n" /* new instruction */ \
|
||||
" .else\n" \
|
||||
" .hword " __stringify(feature) "\n" /* feature bit */ \
|
||||
" .byte 662b-661b\n" /* source len */ \
|
||||
" .byte 664f-663f\n" /* replacement len */
|
||||
|
||||
#define ALTINSTR_ENTRY_CB(feature, cb) \
|
||||
" .word 661b - .\n" /* label */ \
|
||||
" .word " __stringify(cb) "- .\n" /* callback */ \
|
||||
" .endif\n" \
|
||||
" .hword " __stringify(feature) "\n" /* feature bit */ \
|
||||
" .byte 662b-661b\n" /* source len */ \
|
||||
" .byte 664f-663f\n" /* replacement len */
|
||||
|
@ -56,15 +59,14 @@ void apply_alternatives(void *start, size_t length);
|
|||
*
|
||||
* Alternatives with callbacks do not generate replacement instructions.
|
||||
*/
|
||||
#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \
|
||||
#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
|
||||
".if "__stringify(cfg_enabled)" == 1\n" \
|
||||
"661:\n\t" \
|
||||
oldinstr "\n" \
|
||||
"662:\n" \
|
||||
".pushsection .altinstructions,\"a\"\n" \
|
||||
ALTINSTR_ENTRY(feature,cb) \
|
||||
ALTINSTR_ENTRY(feature) \
|
||||
".popsection\n" \
|
||||
" .if " __stringify(cb) " == 0\n" \
|
||||
".pushsection .altinstr_replacement, \"a\"\n" \
|
||||
"663:\n\t" \
|
||||
newinstr "\n" \
|
||||
|
@ -72,17 +74,25 @@ void apply_alternatives(void *start, size_t length);
|
|||
".popsection\n\t" \
|
||||
".org . - (664b-663b) + (662b-661b)\n\t" \
|
||||
".org . - (662b-661b) + (664b-663b)\n" \
|
||||
".else\n\t" \
|
||||
".endif\n"
|
||||
|
||||
#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
|
||||
".if "__stringify(cfg_enabled)" == 1\n" \
|
||||
"661:\n\t" \
|
||||
oldinstr "\n" \
|
||||
"662:\n" \
|
||||
".pushsection .altinstructions,\"a\"\n" \
|
||||
ALTINSTR_ENTRY_CB(feature, cb) \
|
||||
".popsection\n" \
|
||||
"663:\n\t" \
|
||||
"664:\n\t" \
|
||||
".endif\n" \
|
||||
".endif\n"
|
||||
|
||||
#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
|
||||
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
|
||||
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
|
||||
|
||||
#define ALTERNATIVE_CB(oldinstr, cb) \
|
||||
__ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
|
||||
__ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb)
|
||||
#else
|
||||
|
||||
#include <asm/assembler.h>
|
||||
|
|
|
@ -92,6 +92,7 @@ static inline void __disable_dcache_nomsr(void)
|
|||
#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
|
||||
do { \
|
||||
int align = ~(cache_line_length - 1); \
|
||||
if (start < UINT_MAX - cache_size) \
|
||||
end = min(start + cache_size, end); \
|
||||
start &= align; \
|
||||
} while (0)
|
||||
|
|
|
@ -134,7 +134,7 @@ void release_vpe(struct vpe *v)
|
|||
{
|
||||
list_del(&v->list);
|
||||
if (v->load_addr)
|
||||
release_progmem(v);
|
||||
release_progmem(v->load_addr);
|
||||
kfree(v);
|
||||
}
|
||||
|
||||
|
|
|
@ -31,6 +31,9 @@ static int __init loongson3_platform_init(void)
|
|||
continue;
|
||||
|
||||
pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
|
||||
if (!pdev)
|
||||
return -ENOMEM;
|
||||
|
||||
pdev->name = loongson_sysconf.sensors[i].name;
|
||||
pdev->id = loongson_sysconf.sensors[i].id;
|
||||
pdev->dev.platform_data = &loongson_sysconf.sensors[i];
|
||||
|
|
|
@ -85,6 +85,7 @@ config PPC
|
|||
select BINFMT_ELF
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select OF
|
||||
select OF_DMA_DEFAULT_COHERENT if !NOT_COHERENT_CACHE
|
||||
select OF_EARLY_FLATTREE
|
||||
select OF_RESERVED_MEM
|
||||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
|
|
|
@ -63,6 +63,7 @@ fman@400000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe1000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy0: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -60,6 +60,7 @@ fman@400000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xf1000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy6: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -63,6 +63,7 @@ fman@400000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe3000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy1: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -60,6 +60,7 @@ fman@400000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xf3000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy7: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -59,6 +59,7 @@ fman@400000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe1000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy0: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -59,6 +59,7 @@ fman@400000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe3000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy1: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -59,6 +59,7 @@ fman@400000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe5000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy2: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -59,6 +59,7 @@ fman@400000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe7000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy3: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -59,6 +59,7 @@ fman@400000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe9000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy4: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -59,6 +59,7 @@ fman@400000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xeb000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy5: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -60,6 +60,7 @@ fman@500000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xf1000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy14: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -60,6 +60,7 @@ fman@500000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xf3000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy15: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -59,6 +59,7 @@ fman@500000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe1000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy8: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -59,6 +59,7 @@ fman@500000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe3000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy9: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -59,6 +59,7 @@ fman@500000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe5000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy10: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -59,6 +59,7 @@ fman@500000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe7000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy11: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -59,6 +59,7 @@ fman@500000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xe9000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy12: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -59,6 +59,7 @@ fman@500000 {
|
|||
#size-cells = <0>;
|
||||
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
|
||||
reg = <0xeb000 0x1000>;
|
||||
fsl,erratum-a011043; /* must ignore read errors */
|
||||
|
||||
pcsphy13: ethernet-phy@0 {
|
||||
reg = <0x0>;
|
||||
|
|
|
@ -2199,11 +2199,13 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
|
|||
* oprofile_cpu_type already has a value, then we are
|
||||
* possibly overriding a real PVR with a logical one,
|
||||
* and, in that case, keep the current value for
|
||||
* oprofile_cpu_type.
|
||||
* oprofile_cpu_type. Futhermore, let's ensure that the
|
||||
* fix for the PMAO bug is enabled on compatibility mode.
|
||||
*/
|
||||
if (old.oprofile_cpu_type != NULL) {
|
||||
t->oprofile_cpu_type = old.oprofile_cpu_type;
|
||||
t->oprofile_type = old.oprofile_type;
|
||||
t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -520,12 +520,6 @@ static void *eeh_rmv_device(void *data, void *userdata)
|
|||
|
||||
pci_iov_remove_virtfn(edev->physfn, pdn->vf_index, 0);
|
||||
edev->pdev = NULL;
|
||||
|
||||
/*
|
||||
* We have to set the VF PE number to invalid one, which is
|
||||
* required to plug the VF successfully.
|
||||
*/
|
||||
pdn->pe_number = IODA_INVALID_PE;
|
||||
#endif
|
||||
if (rmv_data)
|
||||
list_add(&edev->rmv_list, &rmv_data->edev_list);
|
||||
|
|
|
@ -271,9 +271,22 @@ void remove_dev_pci_data(struct pci_dev *pdev)
|
|||
continue;
|
||||
|
||||
#ifdef CONFIG_EEH
|
||||
/* Release EEH device for the VF */
|
||||
/*
|
||||
* Release EEH state for this VF. The PCI core
|
||||
* has already torn down the pci_dev for this VF, but
|
||||
* we're responsible to removing the eeh_dev since it
|
||||
* has the same lifetime as the pci_dn that spawned it.
|
||||
*/
|
||||
edev = pdn_to_eeh_dev(pdn);
|
||||
if (edev) {
|
||||
/*
|
||||
* We allocate pci_dn's for the totalvfs count,
|
||||
* but only only the vfs that were activated
|
||||
* have a configured PE.
|
||||
*/
|
||||
if (edev->pe)
|
||||
eeh_rmv_from_parent_pe(edev);
|
||||
|
||||
pdn->edev = NULL;
|
||||
kfree(edev);
|
||||
}
|
||||
|
|
|
@ -1766,7 +1766,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
|
|||
mutex_unlock(&kvm->lock);
|
||||
|
||||
if (!vcore)
|
||||
goto free_vcpu;
|
||||
goto uninit_vcpu;
|
||||
|
||||
spin_lock(&vcore->lock);
|
||||
++vcore->num_threads;
|
||||
|
@ -1782,6 +1782,8 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
|
|||
|
||||
return vcpu;
|
||||
|
||||
uninit_vcpu:
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
free_vcpu:
|
||||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
out:
|
||||
|
|
|
@ -1482,10 +1482,12 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
|
|||
|
||||
err = kvmppc_mmu_init(vcpu);
|
||||
if (err < 0)
|
||||
goto uninit_vcpu;
|
||||
goto free_shared_page;
|
||||
|
||||
return vcpu;
|
||||
|
||||
free_shared_page:
|
||||
free_page((unsigned long)vcpu->arch.shared);
|
||||
uninit_vcpu:
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
free_shadow_vcpu:
|
||||
|
|
|
@ -1524,6 +1524,10 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
|
|||
|
||||
/* Reserve PE for each VF */
|
||||
for (vf_index = 0; vf_index < num_vfs; vf_index++) {
|
||||
int vf_devfn = pci_iov_virtfn_devfn(pdev, vf_index);
|
||||
int vf_bus = pci_iov_virtfn_bus(pdev, vf_index);
|
||||
struct pci_dn *vf_pdn;
|
||||
|
||||
if (pdn->m64_single_mode)
|
||||
pe_num = pdn->pe_num_map[vf_index];
|
||||
else
|
||||
|
@ -1536,13 +1540,11 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
|
|||
pe->pbus = NULL;
|
||||
pe->parent_dev = pdev;
|
||||
pe->mve_number = -1;
|
||||
pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
|
||||
pci_iov_virtfn_devfn(pdev, vf_index);
|
||||
pe->rid = (vf_bus << 8) | vf_devfn;
|
||||
|
||||
pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%d\n",
|
||||
hose->global_number, pdev->bus->number,
|
||||
PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
|
||||
PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
|
||||
PCI_SLOT(vf_devfn), PCI_FUNC(vf_devfn), pe_num);
|
||||
|
||||
if (pnv_ioda_configure_pe(phb, pe)) {
|
||||
/* XXX What do we do here ? */
|
||||
|
@ -1556,6 +1558,15 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
|
|||
list_add_tail(&pe->list, &phb->ioda.pe_list);
|
||||
mutex_unlock(&phb->ioda.pe_list_mutex);
|
||||
|
||||
/* associate this pe to it's pdn */
|
||||
list_for_each_entry(vf_pdn, &pdn->parent->child_list, list) {
|
||||
if (vf_pdn->busno == vf_bus &&
|
||||
vf_pdn->devfn == vf_devfn) {
|
||||
vf_pdn->pe_number = pe_num;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
pnv_pci_ioda2_setup_dma_pe(phb, pe);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -856,16 +856,12 @@ void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
|
|||
struct pnv_phb *phb = hose->private_data;
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
struct pnv_ioda_pe *pe;
|
||||
struct pci_dn *pdn;
|
||||
|
||||
/* Fix the VF pdn PE number */
|
||||
if (pdev->is_virtfn) {
|
||||
pdn = pci_get_pdn(pdev);
|
||||
WARN_ON(pdn->pe_number != IODA_INVALID_PE);
|
||||
list_for_each_entry(pe, &phb->ioda.pe_list, list) {
|
||||
if (pe->rid == ((pdev->bus->number << 8) |
|
||||
(pdev->devfn & 0xff))) {
|
||||
pdn->pe_number = pe->pe_number;
|
||||
pe->pdev = pdev;
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -398,8 +398,10 @@ static bool lmb_is_removable(struct of_drconf_cell *lmb)
|
|||
|
||||
for (i = 0; i < scns_per_block; i++) {
|
||||
pfn = PFN_DOWN(phys_addr);
|
||||
if (!pfn_present(pfn))
|
||||
if (!pfn_present(pfn)) {
|
||||
phys_addr += MIN_MEMORY_BLOCK_SIZE;
|
||||
continue;
|
||||
}
|
||||
|
||||
rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
|
||||
phys_addr += MIN_MEMORY_BLOCK_SIZE;
|
||||
|
|
|
@ -167,10 +167,10 @@ static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
|
|||
return be64_to_cpu(*tcep);
|
||||
}
|
||||
|
||||
static void tce_free_pSeriesLP(struct iommu_table*, long, long);
|
||||
static void tce_free_pSeriesLP(unsigned long liobn, long, long);
|
||||
static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
|
||||
|
||||
static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
||||
static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
|
||||
long npages, unsigned long uaddr,
|
||||
enum dma_data_direction direction,
|
||||
unsigned long attrs)
|
||||
|
@ -181,25 +181,25 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|||
int ret = 0;
|
||||
long tcenum_start = tcenum, npages_start = npages;
|
||||
|
||||
rpn = __pa(uaddr) >> TCE_SHIFT;
|
||||
rpn = __pa(uaddr) >> tceshift;
|
||||
proto_tce = TCE_PCI_READ;
|
||||
if (direction != DMA_TO_DEVICE)
|
||||
proto_tce |= TCE_PCI_WRITE;
|
||||
|
||||
while (npages--) {
|
||||
tce = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
|
||||
rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, tce);
|
||||
tce = proto_tce | (rpn & TCE_RPN_MASK) << tceshift;
|
||||
rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
|
||||
|
||||
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
|
||||
ret = (int)rc;
|
||||
tce_free_pSeriesLP(tbl, tcenum_start,
|
||||
tce_free_pSeriesLP(liobn, tcenum_start,
|
||||
(npages_start - (npages + 1)));
|
||||
break;
|
||||
}
|
||||
|
||||
if (rc && printk_ratelimit()) {
|
||||
printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
|
||||
printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
|
||||
printk("\tindex = 0x%llx\n", (u64)liobn);
|
||||
printk("\ttcenum = 0x%llx\n", (u64)tcenum);
|
||||
printk("\ttce val = 0x%llx\n", tce );
|
||||
dump_stack();
|
||||
|
@ -228,7 +228,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|||
unsigned long flags;
|
||||
|
||||
if ((npages == 1) || !firmware_has_feature(FW_FEATURE_MULTITCE)) {
|
||||
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
|
||||
return tce_build_pSeriesLP(tbl->it_index, tcenum,
|
||||
tbl->it_page_shift, npages, uaddr,
|
||||
direction, attrs);
|
||||
}
|
||||
|
||||
|
@ -244,8 +245,9 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|||
/* If allocation fails, fall back to the loop implementation */
|
||||
if (!tcep) {
|
||||
local_irq_restore(flags);
|
||||
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
|
||||
direction, attrs);
|
||||
return tce_build_pSeriesLP(tbl->it_index, tcenum,
|
||||
tbl->it_page_shift,
|
||||
npages, uaddr, direction, attrs);
|
||||
}
|
||||
__this_cpu_write(tce_page, tcep);
|
||||
}
|
||||
|
@ -296,16 +298,16 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
|
||||
static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages)
|
||||
{
|
||||
u64 rc;
|
||||
|
||||
while (npages--) {
|
||||
rc = plpar_tce_put((u64)tbl->it_index, (u64)tcenum << 12, 0);
|
||||
rc = plpar_tce_put((u64)liobn, (u64)tcenum << 12, 0);
|
||||
|
||||
if (rc && printk_ratelimit()) {
|
||||
printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
|
||||
printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
|
||||
printk("\tindex = 0x%llx\n", (u64)liobn);
|
||||
printk("\ttcenum = 0x%llx\n", (u64)tcenum);
|
||||
dump_stack();
|
||||
}
|
||||
|
@ -320,7 +322,7 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
|
|||
u64 rc;
|
||||
|
||||
if (!firmware_has_feature(FW_FEATURE_MULTITCE))
|
||||
return tce_free_pSeriesLP(tbl, tcenum, npages);
|
||||
return tce_free_pSeriesLP(tbl->it_index, tcenum, npages);
|
||||
|
||||
rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
|
||||
|
||||
|
@ -435,6 +437,19 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
|
|||
u64 rc = 0;
|
||||
long l, limit;
|
||||
|
||||
if (!firmware_has_feature(FW_FEATURE_MULTITCE)) {
|
||||
unsigned long tceshift = be32_to_cpu(maprange->tce_shift);
|
||||
unsigned long dmastart = (start_pfn << PAGE_SHIFT) +
|
||||
be64_to_cpu(maprange->dma_base);
|
||||
unsigned long tcenum = dmastart >> tceshift;
|
||||
unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift;
|
||||
void *uaddr = __va(start_pfn << PAGE_SHIFT);
|
||||
|
||||
return tce_build_pSeriesLP(be32_to_cpu(maprange->liobn),
|
||||
tcenum, tceshift, npages, (unsigned long) uaddr,
|
||||
DMA_BIDIRECTIONAL, 0);
|
||||
}
|
||||
|
||||
local_irq_disable(); /* to protect tcep and the page behind it */
|
||||
tcep = __this_cpu_read(tce_page);
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end);
|
|||
|
||||
static inline void storage_key_init_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (PAGE_DEFAULT_KEY)
|
||||
if (PAGE_DEFAULT_KEY != 0)
|
||||
__storage_key_init_range(start, end);
|
||||
}
|
||||
|
||||
|
|
|
@ -146,7 +146,7 @@ static inline void get_tod_clock_ext(char *clk)
|
|||
|
||||
static inline unsigned long long get_tod_clock(void)
|
||||
{
|
||||
unsigned char clk[STORE_CLOCK_EXT_SIZE];
|
||||
char clk[STORE_CLOCK_EXT_SIZE];
|
||||
|
||||
get_tod_clock_ext(clk);
|
||||
return *((unsigned long long *)&clk[1]);
|
||||
|
|
|
@ -24,6 +24,12 @@ ENTRY(ftrace_stub)
|
|||
#define STACK_PTREGS (STACK_FRAME_OVERHEAD)
|
||||
#define STACK_PTREGS_GPRS (STACK_PTREGS + __PT_GPRS)
|
||||
#define STACK_PTREGS_PSW (STACK_PTREGS + __PT_PSW)
|
||||
#ifdef __PACK_STACK
|
||||
/* allocate just enough for r14, r15 and backchain */
|
||||
#define TRACED_FUNC_FRAME_SIZE 24
|
||||
#else
|
||||
#define TRACED_FUNC_FRAME_SIZE STACK_FRAME_OVERHEAD
|
||||
#endif
|
||||
|
||||
ENTRY(_mcount)
|
||||
BR_EX %r14
|
||||
|
@ -37,9 +43,16 @@ ENTRY(ftrace_caller)
|
|||
#ifndef CC_USING_HOTPATCH
|
||||
aghi %r0,MCOUNT_RETURN_FIXUP
|
||||
#endif
|
||||
aghi %r15,-STACK_FRAME_SIZE
|
||||
# allocate stack frame for ftrace_caller to contain traced function
|
||||
aghi %r15,-TRACED_FUNC_FRAME_SIZE
|
||||
stg %r1,__SF_BACKCHAIN(%r15)
|
||||
stg %r0,(__SF_GPRS+8*8)(%r15)
|
||||
stg %r15,(__SF_GPRS+9*8)(%r15)
|
||||
# allocate pt_regs and stack frame for ftrace_trace_function
|
||||
aghi %r15,-STACK_FRAME_SIZE
|
||||
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
|
||||
aghi %r1,-TRACED_FUNC_FRAME_SIZE
|
||||
stg %r1,__SF_BACKCHAIN(%r15)
|
||||
stg %r0,(STACK_PTREGS_PSW+8)(%r15)
|
||||
stmg %r2,%r14,(STACK_PTREGS_GPRS+2*8)(%r15)
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
|
|
@ -79,8 +79,15 @@ enum {
|
|||
GPIO_FN_WDTOVF,
|
||||
|
||||
/* CAN */
|
||||
GPIO_FN_CTX1, GPIO_FN_CRX1, GPIO_FN_CTX0, GPIO_FN_CTX0_CTX1,
|
||||
GPIO_FN_CRX0, GPIO_FN_CRX0_CRX1, GPIO_FN_CRX0_CRX1_CRX2,
|
||||
GPIO_FN_CTX2, GPIO_FN_CRX2,
|
||||
GPIO_FN_CTX1, GPIO_FN_CRX1,
|
||||
GPIO_FN_CTX0, GPIO_FN_CRX0,
|
||||
GPIO_FN_CTX0_CTX1, GPIO_FN_CRX0_CRX1,
|
||||
GPIO_FN_CTX0_CTX1_CTX2, GPIO_FN_CRX0_CRX1_CRX2,
|
||||
GPIO_FN_CTX2_PJ21, GPIO_FN_CRX2_PJ20,
|
||||
GPIO_FN_CTX1_PJ23, GPIO_FN_CRX1_PJ22,
|
||||
GPIO_FN_CTX0_CTX1_PJ23, GPIO_FN_CRX0_CRX1_PJ22,
|
||||
GPIO_FN_CTX0_CTX1_CTX2_PJ21, GPIO_FN_CRX0_CRX1_CRX2_PJ20,
|
||||
|
||||
/* DMAC */
|
||||
GPIO_FN_TEND0, GPIO_FN_DACK0, GPIO_FN_DREQ0,
|
||||
|
|
|
@ -15,10 +15,10 @@
|
|||
struct ipc64_perm
|
||||
{
|
||||
__kernel_key_t key;
|
||||
__kernel_uid_t uid;
|
||||
__kernel_gid_t gid;
|
||||
__kernel_uid_t cuid;
|
||||
__kernel_gid_t cgid;
|
||||
__kernel_uid32_t uid;
|
||||
__kernel_gid32_t gid;
|
||||
__kernel_uid32_t cuid;
|
||||
__kernel_gid32_t cgid;
|
||||
#ifndef __arch64__
|
||||
unsigned short __pad0;
|
||||
#endif
|
||||
|
|
|
@ -151,12 +151,14 @@ SECTIONS
|
|||
}
|
||||
PERCPU_SECTION(SMP_CACHE_BYTES)
|
||||
|
||||
#ifdef CONFIG_JUMP_LABEL
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
.exit.text : {
|
||||
EXIT_TEXT
|
||||
}
|
||||
#endif
|
||||
|
||||
.exit.data : {
|
||||
EXIT_DATA
|
||||
}
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__init_end = .;
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/smp.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/elf.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/vdso.h>
|
||||
|
|
|
@ -239,6 +239,7 @@ static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
|
|||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
|
||||
|
|
|
@ -1326,6 +1326,8 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
|
|||
old = ((s64)(prev_raw_count << shift) >> shift);
|
||||
local64_add(new - old + count * period, &event->count);
|
||||
|
||||
local64_set(&hwc->period_left, -new);
|
||||
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -305,6 +305,7 @@
|
|||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
|
||||
#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
|
||||
#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
|
||||
#define X86_FEATURE_RDPID (16*32+ 22) /* RDPID instruction */
|
||||
|
||||
/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
|
||||
#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
|
||||
|
|
|
@ -89,8 +89,13 @@ static inline unsigned int __getcpu(void)
|
|||
* works on all CPUs. This is volatile so that it orders
|
||||
* correctly wrt barrier() and to keep gcc from cleverly
|
||||
* hoisting it out of the calling function.
|
||||
*
|
||||
* If RDPID is available, use it.
|
||||
*/
|
||||
asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
|
||||
alternative_io ("lsl %[p],%[seg]",
|
||||
".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
|
||||
X86_FEATURE_RDPID,
|
||||
[p] "=a" (p), [seg] "r" (__PER_CPU_SEG));
|
||||
|
||||
return p;
|
||||
}
|
||||
|
|
|
@ -388,7 +388,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
|
|||
* cpuid bit to be set. We need to ensure that we
|
||||
* update that bit in this CPU's "cpu_info".
|
||||
*/
|
||||
get_cpu_cap(c);
|
||||
set_cpu_cap(c, X86_FEATURE_OSPKE);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
|
|
|
@ -846,9 +846,12 @@ static const struct sysfs_ops threshold_ops = {
|
|||
.store = store,
|
||||
};
|
||||
|
||||
static void threshold_block_release(struct kobject *kobj);
|
||||
|
||||
static struct kobj_type threshold_ktype = {
|
||||
.sysfs_ops = &threshold_ops,
|
||||
.default_attrs = default_attrs,
|
||||
.release = threshold_block_release,
|
||||
};
|
||||
|
||||
static const char *get_name(unsigned int bank, struct threshold_block *b)
|
||||
|
@ -879,8 +882,9 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
|
|||
return buf_mcatype;
|
||||
}
|
||||
|
||||
static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
|
||||
unsigned int block, u32 address)
|
||||
static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
|
||||
unsigned int bank, unsigned int block,
|
||||
u32 address)
|
||||
{
|
||||
struct threshold_block *b = NULL;
|
||||
u32 low, high;
|
||||
|
@ -924,16 +928,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
|
|||
|
||||
INIT_LIST_HEAD(&b->miscj);
|
||||
|
||||
if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
|
||||
list_add(&b->miscj,
|
||||
&per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
|
||||
} else {
|
||||
per_cpu(threshold_banks, cpu)[bank]->blocks = b;
|
||||
}
|
||||
if (tb->blocks)
|
||||
list_add(&b->miscj, &tb->blocks->miscj);
|
||||
else
|
||||
tb->blocks = b;
|
||||
|
||||
err = kobject_init_and_add(&b->kobj, &threshold_ktype,
|
||||
per_cpu(threshold_banks, cpu)[bank]->kobj,
|
||||
get_name(bank, b));
|
||||
err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
|
||||
if (err)
|
||||
goto out_free;
|
||||
recurse:
|
||||
|
@ -941,7 +941,7 @@ recurse:
|
|||
if (!address)
|
||||
return 0;
|
||||
|
||||
err = allocate_threshold_blocks(cpu, bank, block, address);
|
||||
err = allocate_threshold_blocks(cpu, tb, bank, block, address);
|
||||
if (err)
|
||||
goto out_free;
|
||||
|
||||
|
@ -1026,8 +1026,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
per_cpu(threshold_banks, cpu)[bank] = b;
|
||||
|
||||
if (is_shared_bank(bank)) {
|
||||
atomic_set(&b->cpus, 1);
|
||||
|
||||
|
@ -1038,9 +1036,13 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
|||
}
|
||||
}
|
||||
|
||||
err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
|
||||
if (!err)
|
||||
goto out;
|
||||
err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
|
||||
if (err)
|
||||
goto out_free;
|
||||
|
||||
per_cpu(threshold_banks, cpu)[bank] = b;
|
||||
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
kfree(b);
|
||||
|
@ -1074,8 +1076,12 @@ static int threshold_create_device(unsigned int cpu)
|
|||
return err;
|
||||
}
|
||||
|
||||
static void deallocate_threshold_block(unsigned int cpu,
|
||||
unsigned int bank)
|
||||
static void threshold_block_release(struct kobject *kobj)
|
||||
{
|
||||
kfree(to_block(kobj));
|
||||
}
|
||||
|
||||
static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
|
||||
{
|
||||
struct threshold_block *pos = NULL;
|
||||
struct threshold_block *tmp = NULL;
|
||||
|
@ -1085,13 +1091,11 @@ static void deallocate_threshold_block(unsigned int cpu,
|
|||
return;
|
||||
|
||||
list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
|
||||
kobject_put(&pos->kobj);
|
||||
list_del(&pos->miscj);
|
||||
kfree(pos);
|
||||
kobject_put(&pos->kobj);
|
||||
}
|
||||
|
||||
kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
|
||||
per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
|
||||
kobject_put(&head->blocks->kobj);
|
||||
}
|
||||
|
||||
static void __threshold_remove_blocks(struct threshold_bank *b)
|
||||
|
|
|
@ -115,11 +115,12 @@ void __init tsx_init(void)
|
|||
tsx_disable();
|
||||
|
||||
/*
|
||||
* tsx_disable() will change the state of the
|
||||
* RTM CPUID bit. Clear it here since it is now
|
||||
* expected to be not set.
|
||||
* tsx_disable() will change the state of the RTM and HLE CPUID
|
||||
* bits. Clear them here since they are now expected to be not
|
||||
* set.
|
||||
*/
|
||||
setup_clear_cpu_cap(X86_FEATURE_RTM);
|
||||
setup_clear_cpu_cap(X86_FEATURE_HLE);
|
||||
} else if (tsx_ctrl_state == TSX_CTRL_ENABLE) {
|
||||
|
||||
/*
|
||||
|
@ -131,10 +132,10 @@ void __init tsx_init(void)
|
|||
tsx_enable();
|
||||
|
||||
/*
|
||||
* tsx_enable() will change the state of the
|
||||
* RTM CPUID bit. Force it here since it is now
|
||||
* expected to be set.
|
||||
* tsx_enable() will change the state of the RTM and HLE CPUID
|
||||
* bits. Force them here since they are now expected to be set.
|
||||
*/
|
||||
setup_force_cpu_cap(X86_FEATURE_RTM);
|
||||
setup_force_cpu_cap(X86_FEATURE_HLE);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -94,11 +94,11 @@ __init int create_simplefb(const struct screen_info *si,
|
|||
if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
|
||||
size <<= 16;
|
||||
length = mode->height * mode->stride;
|
||||
length = PAGE_ALIGN(length);
|
||||
if (length > size) {
|
||||
printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
length = PAGE_ALIGN(length);
|
||||
|
||||
/* setup IORESOURCE_MEM as framebuffer memory */
|
||||
memset(&res, 0, sizeof(res));
|
||||
|
|
|
@ -279,13 +279,18 @@ static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
|
|||
{
|
||||
switch (func) {
|
||||
case 0:
|
||||
entry->eax = 1; /* only one leaf currently */
|
||||
entry->eax = 7;
|
||||
++*nent;
|
||||
break;
|
||||
case 1:
|
||||
entry->ecx = F(MOVBE);
|
||||
++*nent;
|
||||
break;
|
||||
case 7:
|
||||
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
|
||||
if (index == 0)
|
||||
entry->ecx = F(RDPID);
|
||||
++*nent;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/nospec.h>
|
||||
#include "kvm_cache_regs.h"
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <linux/stringify.h>
|
||||
|
@ -3530,6 +3531,16 @@ static int em_cwd(struct x86_emulate_ctxt *ctxt)
|
|||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
static int em_rdpid(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
u64 tsc_aux = 0;
|
||||
|
||||
if (ctxt->ops->get_msr(ctxt, MSR_TSC_AUX, &tsc_aux))
|
||||
return emulate_gp(ctxt, 0);
|
||||
ctxt->dst.val = tsc_aux;
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
u64 tsc = 0;
|
||||
|
@ -4390,10 +4401,20 @@ static const struct opcode group8[] = {
|
|||
F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
|
||||
};
|
||||
|
||||
/*
|
||||
* The "memory" destination is actually always a register, since we come
|
||||
* from the register case of group9.
|
||||
*/
|
||||
static const struct gprefix pfx_0f_c7_7 = {
|
||||
N, N, N, II(DstMem | ModRM | Op3264 | EmulateOnUD, em_rdpid, rdtscp),
|
||||
};
|
||||
|
||||
|
||||
static const struct group_dual group9 = { {
|
||||
N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
|
||||
}, {
|
||||
N, N, N, N, N, N, N, N,
|
||||
N, N, N, N, N, N, N,
|
||||
GP(0, &pfx_0f_c7_7),
|
||||
} };
|
||||
|
||||
static const struct opcode group11[] = {
|
||||
|
@ -5053,16 +5074,28 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
|
|||
ctxt->ad_bytes = def_ad_bytes ^ 6;
|
||||
break;
|
||||
case 0x26: /* ES override */
|
||||
has_seg_override = true;
|
||||
ctxt->seg_override = VCPU_SREG_ES;
|
||||
break;
|
||||
case 0x2e: /* CS override */
|
||||
has_seg_override = true;
|
||||
ctxt->seg_override = VCPU_SREG_CS;
|
||||
break;
|
||||
case 0x36: /* SS override */
|
||||
has_seg_override = true;
|
||||
ctxt->seg_override = VCPU_SREG_SS;
|
||||
break;
|
||||
case 0x3e: /* DS override */
|
||||
has_seg_override = true;
|
||||
ctxt->seg_override = (ctxt->b >> 3) & 3;
|
||||
ctxt->seg_override = VCPU_SREG_DS;
|
||||
break;
|
||||
case 0x64: /* FS override */
|
||||
has_seg_override = true;
|
||||
ctxt->seg_override = VCPU_SREG_FS;
|
||||
break;
|
||||
case 0x65: /* GS override */
|
||||
has_seg_override = true;
|
||||
ctxt->seg_override = ctxt->b & 7;
|
||||
ctxt->seg_override = VCPU_SREG_GS;
|
||||
break;
|
||||
case 0x40 ... 0x4f: /* REX */
|
||||
if (mode != X86EMUL_MODE_PROT64)
|
||||
|
@ -5146,10 +5179,15 @@ done_prefixes:
|
|||
}
|
||||
break;
|
||||
case Escape:
|
||||
if (ctxt->modrm > 0xbf)
|
||||
opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
|
||||
else
|
||||
if (ctxt->modrm > 0xbf) {
|
||||
size_t size = ARRAY_SIZE(opcode.u.esc->high);
|
||||
u32 index = array_index_nospec(
|
||||
ctxt->modrm - 0xc0, size);
|
||||
|
||||
opcode = opcode.u.esc->high[index];
|
||||
} else {
|
||||
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
|
||||
}
|
||||
break;
|
||||
case InstrDual:
|
||||
if ((ctxt->modrm >> 6) == 3)
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <asm/apicdef.h>
|
||||
#include <trace/events/kvm.h>
|
||||
|
||||
|
@ -719,11 +720,12 @@ static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
|
|||
u32 index, u64 *pdata)
|
||||
{
|
||||
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
|
||||
size_t size = ARRAY_SIZE(hv->hv_crash_param);
|
||||
|
||||
if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
|
||||
if (WARN_ON_ONCE(index >= size))
|
||||
return -EINVAL;
|
||||
|
||||
*pdata = hv->hv_crash_param[index];
|
||||
*pdata = hv->hv_crash_param[array_index_nospec(index, size)];
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -762,11 +764,12 @@ static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
|
|||
u32 index, u64 data)
|
||||
{
|
||||
struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
|
||||
size_t size = ARRAY_SIZE(hv->hv_crash_param);
|
||||
|
||||
if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
|
||||
if (WARN_ON_ONCE(index >= size))
|
||||
return -EINVAL;
|
||||
|
||||
hv->hv_crash_param[index] = data;
|
||||
hv->hv_crash_param[array_index_nospec(index, size)] = data;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/current.h>
|
||||
|
@ -73,13 +74,14 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
|
|||
default:
|
||||
{
|
||||
u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
|
||||
u64 redir_content;
|
||||
u64 redir_content = ~0ULL;
|
||||
|
||||
if (redir_index < IOAPIC_NUM_PINS)
|
||||
redir_content =
|
||||
ioapic->redirtbl[redir_index].bits;
|
||||
else
|
||||
redir_content = ~0ULL;
|
||||
if (redir_index < IOAPIC_NUM_PINS) {
|
||||
u32 index = array_index_nospec(
|
||||
redir_index, IOAPIC_NUM_PINS);
|
||||
|
||||
redir_content = ioapic->redirtbl[index].bits;
|
||||
}
|
||||
|
||||
result = (ioapic->ioregsel & 0x1) ?
|
||||
(redir_content >> 32) & 0xffffffff :
|
||||
|
@ -299,6 +301,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
|
|||
ioapic_debug("change redir index %x val %x\n", index, val);
|
||||
if (index >= IOAPIC_NUM_PINS)
|
||||
return;
|
||||
index = array_index_nospec(index, IOAPIC_NUM_PINS);
|
||||
e = &ioapic->redirtbl[index];
|
||||
mask_before = e->fields.mask;
|
||||
/* Preserve read-only fields */
|
||||
|
|
|
@ -436,7 +436,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
|
|||
|
||||
kvm_set_msi_irq(vcpu->kvm, entry, &irq);
|
||||
|
||||
if (irq.level && kvm_apic_match_dest(vcpu, NULL, 0,
|
||||
if (irq.trig_mode && kvm_apic_match_dest(vcpu, NULL, 0,
|
||||
irq.dest_id, irq.dest_mode))
|
||||
__set_bit(irq.vector, ioapic_handled_vectors);
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/math64.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/page.h>
|
||||
|
@ -531,9 +532,11 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
|
|||
static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u8 val;
|
||||
if (pv_eoi_get_user(vcpu, &val) < 0)
|
||||
if (pv_eoi_get_user(vcpu, &val) < 0) {
|
||||
apic_debug("Can't read EOI MSR value: 0x%llx\n",
|
||||
(unsigned long long)vcpu->arch.pv_eoi.msr_val);
|
||||
return false;
|
||||
}
|
||||
return val & 0x1;
|
||||
}
|
||||
|
||||
|
@ -1587,15 +1590,20 @@ int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
|
|||
case APIC_LVTTHMR:
|
||||
case APIC_LVTPC:
|
||||
case APIC_LVT1:
|
||||
case APIC_LVTERR:
|
||||
case APIC_LVTERR: {
|
||||
/* TODO: Check vector */
|
||||
size_t size;
|
||||
u32 index;
|
||||
|
||||
if (!kvm_apic_sw_enabled(apic))
|
||||
val |= APIC_LVT_MASKED;
|
||||
|
||||
val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
|
||||
size = ARRAY_SIZE(apic_lvt_mask);
|
||||
index = array_index_nospec(
|
||||
(reg - APIC_LVTT) >> 4, size);
|
||||
val &= apic_lvt_mask[index];
|
||||
kvm_lapic_set_reg(apic, reg, val);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case APIC_LVTT:
|
||||
if (!kvm_apic_sw_enabled(apic))
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <asm/mtrr.h>
|
||||
|
||||
#include "cpuid.h"
|
||||
|
@ -202,11 +203,15 @@ static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
|
|||
break;
|
||||
case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
|
||||
*seg = 1;
|
||||
*unit = msr - MSR_MTRRfix16K_80000;
|
||||
*unit = array_index_nospec(
|
||||
msr - MSR_MTRRfix16K_80000,
|
||||
MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1);
|
||||
break;
|
||||
case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
|
||||
*seg = 2;
|
||||
*unit = msr - MSR_MTRRfix4K_C0000;
|
||||
*unit = array_index_nospec(
|
||||
msr - MSR_MTRRfix4K_C0000,
|
||||
MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1);
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef __KVM_X86_PMU_H
|
||||
#define __KVM_X86_PMU_H
|
||||
|
||||
#include <linux/nospec.h>
|
||||
|
||||
#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
|
||||
#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu))
|
||||
#define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu)
|
||||
|
@ -80,8 +82,12 @@ static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
|
|||
static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
|
||||
u32 base)
|
||||
{
|
||||
if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
|
||||
return &pmu->gp_counters[msr - base];
|
||||
if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
|
||||
u32 index = array_index_nospec(msr - base,
|
||||
pmu->nr_arch_gp_counters);
|
||||
|
||||
return &pmu->gp_counters[index];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -91,8 +97,12 @@ static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
|
|||
{
|
||||
int base = MSR_CORE_PERF_FIXED_CTR0;
|
||||
|
||||
if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
|
||||
return &pmu->fixed_counters[msr - base];
|
||||
if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
|
||||
u32 index = array_index_nospec(msr - base,
|
||||
pmu->nr_arch_fixed_counters);
|
||||
|
||||
return &pmu->fixed_counters[index];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -87,10 +87,14 @@ static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
|
|||
|
||||
static unsigned intel_find_fixed_event(int idx)
|
||||
{
|
||||
if (idx >= ARRAY_SIZE(fixed_pmc_events))
|
||||
u32 event;
|
||||
size_t size = ARRAY_SIZE(fixed_pmc_events);
|
||||
|
||||
if (idx >= size)
|
||||
return PERF_COUNT_HW_MAX;
|
||||
|
||||
return intel_arch_events[fixed_pmc_events[idx]].event_type;
|
||||
event = fixed_pmc_events[array_index_nospec(idx, size)];
|
||||
return intel_arch_events[event].event_type;
|
||||
}
|
||||
|
||||
/* check if a PMC is enabled by comparing it with globl_ctrl bits. */
|
||||
|
@ -131,15 +135,19 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
|
|||
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
||||
bool fixed = idx & (1u << 30);
|
||||
struct kvm_pmc *counters;
|
||||
unsigned int num_counters;
|
||||
|
||||
idx &= ~(3u << 30);
|
||||
if (!fixed && idx >= pmu->nr_arch_gp_counters)
|
||||
if (fixed) {
|
||||
counters = pmu->fixed_counters;
|
||||
num_counters = pmu->nr_arch_fixed_counters;
|
||||
} else {
|
||||
counters = pmu->gp_counters;
|
||||
num_counters = pmu->nr_arch_gp_counters;
|
||||
}
|
||||
if (idx >= num_counters)
|
||||
return NULL;
|
||||
if (fixed && idx >= pmu->nr_arch_fixed_counters)
|
||||
return NULL;
|
||||
counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
|
||||
|
||||
return &counters[idx];
|
||||
return &counters[array_index_nospec(idx, num_counters)];
|
||||
}
|
||||
|
||||
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
|
||||
|
|
|
@ -4641,6 +4641,26 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
|
|||
(ss.selector & SEGMENT_RPL_MASK));
|
||||
}
|
||||
|
||||
static bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu,
|
||||
unsigned int port, int size);
|
||||
static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
|
||||
struct vmcs12 *vmcs12)
|
||||
{
|
||||
unsigned long exit_qualification;
|
||||
unsigned short port;
|
||||
int size;
|
||||
|
||||
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
|
||||
return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
|
||||
|
||||
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
||||
|
||||
port = exit_qualification >> 16;
|
||||
size = (exit_qualification & 7) + 1;
|
||||
|
||||
return nested_vmx_check_io_bitmaps(vcpu, port, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if guest state is valid. Returns true if valid, false if
|
||||
* not.
|
||||
|
@ -7653,8 +7673,10 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
|
|||
/* _system ok, as nested_vmx_check_permission verified cpl=0 */
|
||||
if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
|
||||
(is_long_mode(vcpu) ? 8 : 4),
|
||||
&e))
|
||||
&e)) {
|
||||
kvm_inject_page_fault(vcpu, &e);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
nested_vmx_succeed(vcpu);
|
||||
|
@ -8024,23 +8046,17 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
|
|||
static const int kvm_vmx_max_exit_handlers =
|
||||
ARRAY_SIZE(kvm_vmx_exit_handlers);
|
||||
|
||||
static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
|
||||
struct vmcs12 *vmcs12)
|
||||
/*
|
||||
* Return true if an IO instruction with the specified port and size should cause
|
||||
* a VM-exit into L1.
|
||||
*/
|
||||
bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
|
||||
int size)
|
||||
{
|
||||
unsigned long exit_qualification;
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
gpa_t bitmap, last_bitmap;
|
||||
unsigned int port;
|
||||
int size;
|
||||
u8 b;
|
||||
|
||||
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
|
||||
return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
|
||||
|
||||
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
||||
|
||||
port = exit_qualification >> 16;
|
||||
size = (exit_qualification & 7) + 1;
|
||||
|
||||
last_bitmap = (gpa_t)-1;
|
||||
b = -1;
|
||||
|
||||
|
@ -11332,11 +11348,71 @@ static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
|
|||
to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
|
||||
}
|
||||
|
||||
static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
|
||||
struct x86_instruction_info *info)
|
||||
{
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
unsigned short port;
|
||||
bool intercept;
|
||||
int size;
|
||||
|
||||
if (info->intercept == x86_intercept_in ||
|
||||
info->intercept == x86_intercept_ins) {
|
||||
port = info->src_val;
|
||||
size = info->dst_bytes;
|
||||
} else {
|
||||
port = info->dst_val;
|
||||
size = info->src_bytes;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the 'use IO bitmaps' VM-execution control is 0, IO instruction
|
||||
* VM-exits depend on the 'unconditional IO exiting' VM-execution
|
||||
* control.
|
||||
*
|
||||
* Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
|
||||
*/
|
||||
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
|
||||
intercept = nested_cpu_has(vmcs12,
|
||||
CPU_BASED_UNCOND_IO_EXITING);
|
||||
else
|
||||
intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
|
||||
|
||||
return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
static int vmx_check_intercept(struct kvm_vcpu *vcpu,
|
||||
struct x86_instruction_info *info,
|
||||
enum x86_intercept_stage stage)
|
||||
{
|
||||
return X86EMUL_CONTINUE;
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
|
||||
|
||||
switch (info->intercept) {
|
||||
/*
|
||||
* RDPID causes #UD if disabled through secondary execution controls.
|
||||
* Because it is marked as EmulateOnUD, we need to intercept it here.
|
||||
*/
|
||||
case x86_intercept_rdtscp:
|
||||
if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
|
||||
ctxt->exception.vector = UD_VECTOR;
|
||||
ctxt->exception.error_code_valid = false;
|
||||
return X86EMUL_PROPAGATE_FAULT;
|
||||
}
|
||||
break;
|
||||
|
||||
case x86_intercept_in:
|
||||
case x86_intercept_ins:
|
||||
case x86_intercept_out:
|
||||
case x86_intercept_outs:
|
||||
return vmx_check_intercept_io(vcpu, info);
|
||||
|
||||
/* TODO: check more intercepts... */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
|
|
@ -54,6 +54,7 @@
|
|||
#include <linux/pvclock_gtod.h>
|
||||
#include <linux/kvm_irqfd.h>
|
||||
#include <linux/irqbypass.h>
|
||||
#include <linux/nospec.h>
|
||||
#include <trace/events/kvm.h>
|
||||
|
||||
#include <asm/debugreg.h>
|
||||
|
@ -889,9 +890,11 @@ static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
|
|||
|
||||
static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
|
||||
{
|
||||
size_t size = ARRAY_SIZE(vcpu->arch.db);
|
||||
|
||||
switch (dr) {
|
||||
case 0 ... 3:
|
||||
vcpu->arch.db[dr] = val;
|
||||
vcpu->arch.db[array_index_nospec(dr, size)] = val;
|
||||
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
|
||||
vcpu->arch.eff_db[dr] = val;
|
||||
break;
|
||||
|
@ -928,9 +931,11 @@ EXPORT_SYMBOL_GPL(kvm_set_dr);
|
|||
|
||||
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
|
||||
{
|
||||
size_t size = ARRAY_SIZE(vcpu->arch.db);
|
||||
|
||||
switch (dr) {
|
||||
case 0 ... 3:
|
||||
*val = vcpu->arch.db[dr];
|
||||
*val = vcpu->arch.db[array_index_nospec(dr, size)];
|
||||
break;
|
||||
case 4:
|
||||
/* fall through */
|
||||
|
@ -2125,7 +2130,10 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
|||
default:
|
||||
if (msr >= MSR_IA32_MC0_CTL &&
|
||||
msr < MSR_IA32_MCx_CTL(bank_num)) {
|
||||
u32 offset = msr - MSR_IA32_MC0_CTL;
|
||||
u32 offset = array_index_nospec(
|
||||
msr - MSR_IA32_MC0_CTL,
|
||||
MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL);
|
||||
|
||||
/* only 0 or all 1s can be written to IA32_MCi_CTL
|
||||
* some Linux kernels though clear bit 10 in bank 4 to
|
||||
* workaround a BIOS/GART TBL issue on AMD K8s, ignore
|
||||
|
@ -2493,7 +2501,10 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
|||
default:
|
||||
if (msr >= MSR_IA32_MC0_CTL &&
|
||||
msr < MSR_IA32_MCx_CTL(bank_num)) {
|
||||
u32 offset = msr - MSR_IA32_MC0_CTL;
|
||||
u32 offset = array_index_nospec(
|
||||
msr - MSR_IA32_MC0_CTL,
|
||||
MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL);
|
||||
|
||||
data = vcpu->arch.mce_banks[offset];
|
||||
break;
|
||||
}
|
||||
|
@ -6121,14 +6132,12 @@ static void kvm_set_mmio_spte_mask(void)
|
|||
/* Set the present bit. */
|
||||
mask |= 1ull;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* If reserved bit is not supported, clear the present bit to disable
|
||||
* mmio page fault.
|
||||
*/
|
||||
if (maxphyaddr == 52)
|
||||
mask &= ~1ull;
|
||||
#endif
|
||||
|
||||
kvm_mmu_set_mmio_spte_mask(mask);
|
||||
}
|
||||
|
@ -7798,7 +7807,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
|||
kvm_mmu_unload(vcpu);
|
||||
vcpu_put(vcpu);
|
||||
|
||||
kvm_x86_ops->vcpu_free(vcpu);
|
||||
kvm_arch_vcpu_free(vcpu);
|
||||
}
|
||||
|
||||
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
|
|
|
@ -909,7 +909,7 @@ EndTable
|
|||
|
||||
GrpTable: Grp3_2
|
||||
0: TEST Ev,Iz
|
||||
1:
|
||||
1: TEST Ev,Iz
|
||||
2: NOT Ev
|
||||
3: NEG Ev
|
||||
4: MUL rAX,Ev
|
||||
|
|
|
@ -478,7 +478,6 @@ void __init efi_init(void)
|
|||
efi_char16_t *c16;
|
||||
char vendor[100] = "unknown";
|
||||
int i = 0;
|
||||
void *tmp;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
if (boot_params.efi_info.efi_systab_hi ||
|
||||
|
@ -503,14 +502,16 @@ void __init efi_init(void)
|
|||
/*
|
||||
* Show what we know for posterity
|
||||
*/
|
||||
c16 = tmp = early_memremap(efi.systab->fw_vendor, 2);
|
||||
c16 = early_memremap_ro(efi.systab->fw_vendor,
|
||||
sizeof(vendor) * sizeof(efi_char16_t));
|
||||
if (c16) {
|
||||
for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i)
|
||||
vendor[i] = *c16++;
|
||||
for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
|
||||
vendor[i] = c16[i];
|
||||
vendor[i] = '\0';
|
||||
} else
|
||||
early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
|
||||
} else {
|
||||
pr_err("Could not map the firmware vendor!\n");
|
||||
early_memunmap(tmp, 2);
|
||||
}
|
||||
|
||||
pr_info("EFI v%u.%.02u by %s\n",
|
||||
efi.systab->hdr.revision >> 16,
|
||||
|
|
|
@ -136,11 +136,13 @@ void af_alg_release_parent(struct sock *sk)
|
|||
sk = ask->parent;
|
||||
ask = alg_sk(sk);
|
||||
|
||||
lock_sock(sk);
|
||||
local_bh_disable();
|
||||
bh_lock_sock(sk);
|
||||
ask->nokey_refcnt -= nokey;
|
||||
if (!last)
|
||||
last = !--ask->refcnt;
|
||||
release_sock(sk);
|
||||
bh_unlock_sock(sk);
|
||||
local_bh_enable();
|
||||
|
||||
if (last)
|
||||
sock_put(sk);
|
||||
|
|
|
@ -686,10 +686,8 @@ EXPORT_SYMBOL_GPL(crypto_grab_spawn);
|
|||
|
||||
void crypto_drop_spawn(struct crypto_spawn *spawn)
|
||||
{
|
||||
if (!spawn->alg)
|
||||
return;
|
||||
|
||||
down_write(&crypto_alg_sem);
|
||||
if (spawn->alg)
|
||||
list_del(&spawn->list);
|
||||
up_write(&crypto_alg_sem);
|
||||
}
|
||||
|
@ -698,22 +696,16 @@ EXPORT_SYMBOL_GPL(crypto_drop_spawn);
|
|||
static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
struct crypto_alg *alg2;
|
||||
|
||||
down_read(&crypto_alg_sem);
|
||||
alg = spawn->alg;
|
||||
alg2 = alg;
|
||||
if (alg2)
|
||||
alg2 = crypto_mod_get(alg2);
|
||||
if (alg && !crypto_mod_get(alg)) {
|
||||
alg->cra_flags |= CRYPTO_ALG_DYING;
|
||||
alg = NULL;
|
||||
}
|
||||
up_read(&crypto_alg_sem);
|
||||
|
||||
if (!alg2) {
|
||||
if (alg)
|
||||
crypto_shoot_alg(alg);
|
||||
return ERR_PTR(-EAGAIN);
|
||||
}
|
||||
|
||||
return alg;
|
||||
return alg ?: ERR_PTR(-EAGAIN);
|
||||
}
|
||||
|
||||
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
|
||||
|
|
|
@ -538,7 +538,7 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
|
|||
lock_sock(sk);
|
||||
tx_nents = skcipher_all_sg_nents(ctx);
|
||||
sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
|
||||
if (unlikely(!sreq->tsg))
|
||||
if (unlikely(ZERO_OR_NULL_PTR(sreq->tsg)))
|
||||
goto unlock;
|
||||
sg_init_table(sreq->tsg, tx_nents);
|
||||
memcpy(iv, ctx->iv, ivsize);
|
||||
|
|
|
@ -356,13 +356,12 @@ static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
|
|||
return len;
|
||||
}
|
||||
|
||||
void crypto_shoot_alg(struct crypto_alg *alg)
|
||||
static void crypto_shoot_alg(struct crypto_alg *alg)
|
||||
{
|
||||
down_write(&crypto_alg_sem);
|
||||
alg->cra_flags |= CRYPTO_ALG_DYING;
|
||||
up_write(&crypto_alg_sem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(crypto_shoot_alg);
|
||||
|
||||
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
|
||||
u32 mask)
|
||||
|
|
|
@ -121,7 +121,6 @@ void crypto_alg_tested(const char *name, int err);
|
|||
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
|
||||
struct crypto_alg *nalg);
|
||||
void crypto_remove_final(struct list_head *list);
|
||||
void crypto_shoot_alg(struct crypto_alg *alg);
|
||||
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
|
||||
u32 mask);
|
||||
void *crypto_create_tfm(struct crypto_alg *alg,
|
||||
|
|
|
@ -130,7 +130,6 @@ static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
|
|||
struct padata_priv *padata = pcrypt_request_padata(preq);
|
||||
|
||||
padata->info = err;
|
||||
req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
padata_do_serial(padata);
|
||||
}
|
||||
|
@ -505,11 +504,12 @@ err:
|
|||
|
||||
static void __exit pcrypt_exit(void)
|
||||
{
|
||||
crypto_unregister_template(&pcrypt_tmpl);
|
||||
|
||||
pcrypt_fini_padata(&pencrypt);
|
||||
pcrypt_fini_padata(&pdecrypt);
|
||||
|
||||
kset_unregister(pcrypt_kset);
|
||||
crypto_unregister_template(&pcrypt_tmpl);
|
||||
}
|
||||
|
||||
module_init(pcrypt_init);
|
||||
|
|
|
@ -129,12 +129,11 @@ void __init acpi_watchdog_init(void)
|
|||
gas = &entries[i].register_region;
|
||||
|
||||
res.start = gas->address;
|
||||
res.end = res.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
|
||||
if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
|
||||
res.flags = IORESOURCE_MEM;
|
||||
res.end = res.start + ALIGN(gas->access_width, 4) - 1;
|
||||
} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
|
||||
res.flags = IORESOURCE_IO;
|
||||
res.end = res.start + gas->access_width - 1;
|
||||
} else {
|
||||
pr_warn("Unsupported address space: %u\n",
|
||||
gas->space_id);
|
||||
|
|
|
@ -272,7 +272,7 @@ cleanup:
|
|||
* FUNCTION: acpi_ds_get_field_names
|
||||
*
|
||||
* PARAMETERS: info - create_field info structure
|
||||
* ` walk_state - Current method state
|
||||
* walk_state - Current method state
|
||||
* arg - First parser arg for the field name list
|
||||
*
|
||||
* RETURN: Status
|
||||
|
|
|
@ -440,6 +440,27 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
|
|||
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
|
||||
walk_state));
|
||||
|
||||
/*
|
||||
* Disassembler: handle create field operators here.
|
||||
*
|
||||
* create_buffer_field is a deferred op that is typically processed in load
|
||||
* pass 2. However, disassembly of control method contents walk the parse
|
||||
* tree with ACPI_PARSE_LOAD_PASS1 and AML_CREATE operators are processed
|
||||
* in a later walk. This is a problem when there is a control method that
|
||||
* has the same name as the AML_CREATE object. In this case, any use of the
|
||||
* name segment will be detected as a method call rather than a reference
|
||||
* to a buffer field.
|
||||
*
|
||||
* This earlier creation during disassembly solves this issue by inserting
|
||||
* the named object in the ACPI namespace so that references to this name
|
||||
* would be a name string rather than a method call.
|
||||
*/
|
||||
if ((walk_state->parse_flags & ACPI_PARSE_DISASSEMBLE) &&
|
||||
(walk_state->op_info->flags & AML_CREATE)) {
|
||||
status = acpi_ds_create_buffer_field(op, walk_state);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
/* We are only interested in opcodes that have an associated name */
|
||||
|
||||
if (!(walk_state->op_info->flags & (AML_NAMED | AML_FIELD))) {
|
||||
|
|
|
@ -86,6 +86,7 @@ enum board_ids {
|
|||
|
||||
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
|
||||
static void ahci_remove_one(struct pci_dev *dev);
|
||||
static void ahci_shutdown_one(struct pci_dev *dev);
|
||||
static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
|
||||
unsigned long deadline);
|
||||
static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
|
||||
|
@ -582,6 +583,7 @@ static struct pci_driver ahci_pci_driver = {
|
|||
.id_table = ahci_pci_tbl,
|
||||
.probe = ahci_init_one,
|
||||
.remove = ahci_remove_one,
|
||||
.shutdown = ahci_shutdown_one,
|
||||
.driver = {
|
||||
.pm = &ahci_pci_pm_ops,
|
||||
},
|
||||
|
@ -1775,6 +1777,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void ahci_shutdown_one(struct pci_dev *pdev)
|
||||
{
|
||||
ata_pci_shutdown_one(pdev);
|
||||
}
|
||||
|
||||
static void ahci_remove_one(struct pci_dev *pdev)
|
||||
{
|
||||
pm_runtime_get_noresume(&pdev->dev);
|
||||
|
|
|
@ -6580,6 +6580,26 @@ void ata_pci_remove_one(struct pci_dev *pdev)
|
|||
ata_host_detach(host);
|
||||
}
|
||||
|
||||
void ata_pci_shutdown_one(struct pci_dev *pdev)
|
||||
{
|
||||
struct ata_host *host = pci_get_drvdata(pdev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < host->n_ports; i++) {
|
||||
struct ata_port *ap = host->ports[i];
|
||||
|
||||
ap->pflags |= ATA_PFLAG_FROZEN;
|
||||
|
||||
/* Disable port interrupts */
|
||||
if (ap->ops->freeze)
|
||||
ap->ops->freeze(ap);
|
||||
|
||||
/* Stop the port DMA engines */
|
||||
if (ap->ops->port_stop)
|
||||
ap->ops->port_stop(ap);
|
||||
}
|
||||
}
|
||||
|
||||
/* move to PCI subsystem */
|
||||
int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
|
||||
{
|
||||
|
@ -7200,6 +7220,7 @@ EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
|
|||
|
||||
#ifdef CONFIG_PCI
|
||||
EXPORT_SYMBOL_GPL(pci_test_config_bits);
|
||||
EXPORT_SYMBOL_GPL(ata_pci_shutdown_one);
|
||||
EXPORT_SYMBOL_GPL(ata_pci_remove_one);
|
||||
#ifdef CONFIG_PM
|
||||
EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
|
||||
|
|
|
@ -372,7 +372,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
|
|||
here = (eni_vcc->descr+skip) & (eni_vcc->words-1);
|
||||
dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci
|
||||
<< MID_DMA_VCI_SHIFT) | MID_DT_JK;
|
||||
j++;
|
||||
dma[j++] = 0;
|
||||
}
|
||||
here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1);
|
||||
if (!eff) size += skip;
|
||||
|
@ -445,7 +445,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
|
|||
if (size != eff) {
|
||||
dma[j++] = (here << MID_DMA_COUNT_SHIFT) |
|
||||
(vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK;
|
||||
j++;
|
||||
dma[j++] = 0;
|
||||
}
|
||||
if (!j || j > 2*RX_DMA_BUF) {
|
||||
printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n");
|
||||
|
|
|
@ -341,7 +341,10 @@ static int really_probe(struct device *dev, struct device_driver *drv)
|
|||
atomic_inc(&probe_count);
|
||||
pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
|
||||
drv->bus->name, __func__, drv->name, dev_name(dev));
|
||||
WARN_ON(!list_empty(&dev->devres_head));
|
||||
if (!list_empty(&dev->devres_head)) {
|
||||
dev_crit(dev, "Resources present before probing\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
re_probe:
|
||||
dev->driver = drv;
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/limits.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "base.h"
|
||||
#include "power/power.h"
|
||||
|
@ -68,7 +69,7 @@ void __weak arch_setup_pdev_archdata(struct platform_device *pdev)
|
|||
struct resource *platform_get_resource(struct platform_device *dev,
|
||||
unsigned int type, unsigned int num)
|
||||
{
|
||||
int i;
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < dev->num_resources; i++) {
|
||||
struct resource *r = &dev->resource[i];
|
||||
|
@ -153,7 +154,7 @@ struct resource *platform_get_resource_byname(struct platform_device *dev,
|
|||
unsigned int type,
|
||||
const char *name)
|
||||
{
|
||||
int i;
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < dev->num_resources; i++) {
|
||||
struct resource *r = &dev->resource[i];
|
||||
|
@ -350,7 +351,8 @@ EXPORT_SYMBOL_GPL(platform_device_add_properties);
|
|||
*/
|
||||
int platform_device_add(struct platform_device *pdev)
|
||||
{
|
||||
int i, ret;
|
||||
u32 i;
|
||||
int ret;
|
||||
|
||||
if (!pdev)
|
||||
return -EINVAL;
|
||||
|
@ -416,7 +418,7 @@ int platform_device_add(struct platform_device *pdev)
|
|||
pdev->id = PLATFORM_DEVID_AUTO;
|
||||
}
|
||||
|
||||
while (--i >= 0) {
|
||||
while (i--) {
|
||||
struct resource *r = &pdev->resource[i];
|
||||
if (r->parent)
|
||||
release_resource(r);
|
||||
|
@ -437,7 +439,7 @@ EXPORT_SYMBOL_GPL(platform_device_add);
|
|||
*/
|
||||
void platform_device_del(struct platform_device *pdev)
|
||||
{
|
||||
int i;
|
||||
u32 i;
|
||||
|
||||
if (pdev) {
|
||||
device_remove_properties(&pdev->dev);
|
||||
|
|
|
@ -581,6 +581,25 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
|
|||
return kobj;
|
||||
}
|
||||
|
||||
static inline void brd_check_and_reset_par(void)
|
||||
{
|
||||
if (unlikely(!max_part))
|
||||
max_part = 1;
|
||||
|
||||
/*
|
||||
* make sure 'max_part' can be divided exactly by (1U << MINORBITS),
|
||||
* otherwise, it is possiable to get same dev_t when adding partitions.
|
||||
*/
|
||||
if ((1U << MINORBITS) % max_part != 0)
|
||||
max_part = 1UL << fls(max_part);
|
||||
|
||||
if (max_part > DISK_MAX_PARTS) {
|
||||
pr_info("brd: max_part can't be larger than %d, reset max_part = %d.\n",
|
||||
DISK_MAX_PARTS, DISK_MAX_PARTS);
|
||||
max_part = DISK_MAX_PARTS;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init brd_init(void)
|
||||
{
|
||||
struct brd_device *brd, *next;
|
||||
|
@ -604,8 +623,7 @@ static int __init brd_init(void)
|
|||
if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
|
||||
return -EIO;
|
||||
|
||||
if (unlikely(!max_part))
|
||||
max_part = 1;
|
||||
brd_check_and_reset_par();
|
||||
|
||||
for (i = 0; i < rd_nr; i++) {
|
||||
brd = brd_alloc(i);
|
||||
|
|
|
@ -848,14 +848,17 @@ static void reset_fdc_info(int mode)
|
|||
/* selects the fdc and drive, and enables the fdc's input/dma. */
|
||||
static void set_fdc(int drive)
|
||||
{
|
||||
unsigned int new_fdc = fdc;
|
||||
|
||||
if (drive >= 0 && drive < N_DRIVE) {
|
||||
fdc = FDC(drive);
|
||||
new_fdc = FDC(drive);
|
||||
current_drive = drive;
|
||||
}
|
||||
if (fdc != 1 && fdc != 0) {
|
||||
if (new_fdc >= N_FDC) {
|
||||
pr_info("bad fdc value\n");
|
||||
return;
|
||||
}
|
||||
fdc = new_fdc;
|
||||
set_dor(fdc, ~0, 8);
|
||||
#if N_FDC > 1
|
||||
set_dor(1 - fdc, ~8, 0);
|
||||
|
|
|
@ -746,10 +746,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
|
|||
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
|
||||
msg = ssif_info->curr_msg;
|
||||
if (msg) {
|
||||
if (data) {
|
||||
if (len > IPMI_MAX_MSG_LENGTH)
|
||||
len = IPMI_MAX_MSG_LENGTH;
|
||||
memcpy(msg->rsp, data, len);
|
||||
} else {
|
||||
len = 0;
|
||||
}
|
||||
msg->rsp_size = len;
|
||||
if (msg->rsp_size > IPMI_MAX_MSG_LENGTH)
|
||||
msg->rsp_size = IPMI_MAX_MSG_LENGTH;
|
||||
memcpy(msg->rsp, data, msg->rsp_size);
|
||||
ssif_info->curr_msg = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,10 +18,11 @@
|
|||
#include <linux/serial.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
struct ttyprintk_port {
|
||||
struct tty_port port;
|
||||
struct mutex port_write_mutex;
|
||||
spinlock_t spinlock;
|
||||
};
|
||||
|
||||
static struct ttyprintk_port tpk_port;
|
||||
|
@ -100,11 +101,12 @@ static int tpk_open(struct tty_struct *tty, struct file *filp)
|
|||
static void tpk_close(struct tty_struct *tty, struct file *filp)
|
||||
{
|
||||
struct ttyprintk_port *tpkp = tty->driver_data;
|
||||
unsigned long flags;
|
||||
|
||||
mutex_lock(&tpkp->port_write_mutex);
|
||||
spin_lock_irqsave(&tpkp->spinlock, flags);
|
||||
/* flush tpk_printk buffer */
|
||||
tpk_printk(NULL, 0);
|
||||
mutex_unlock(&tpkp->port_write_mutex);
|
||||
spin_unlock_irqrestore(&tpkp->spinlock, flags);
|
||||
|
||||
tty_port_close(&tpkp->port, tty, filp);
|
||||
}
|
||||
|
@ -116,13 +118,14 @@ static int tpk_write(struct tty_struct *tty,
|
|||
const unsigned char *buf, int count)
|
||||
{
|
||||
struct ttyprintk_port *tpkp = tty->driver_data;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
|
||||
/* exclusive use of tpk_printk within this tty */
|
||||
mutex_lock(&tpkp->port_write_mutex);
|
||||
spin_lock_irqsave(&tpkp->spinlock, flags);
|
||||
ret = tpk_printk(buf, count);
|
||||
mutex_unlock(&tpkp->port_write_mutex);
|
||||
spin_unlock_irqrestore(&tpkp->spinlock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -172,7 +175,7 @@ static int __init ttyprintk_init(void)
|
|||
{
|
||||
int ret = -ENOMEM;
|
||||
|
||||
mutex_init(&tpk_port.port_write_mutex);
|
||||
spin_lock_init(&tpk_port.spinlock);
|
||||
|
||||
ttyprintk_driver = tty_alloc_driver(1,
|
||||
TTY_DRIVER_RESET_TERMIOS |
|
||||
|
|
|
@ -134,7 +134,7 @@ static DEFINE_SPINLOCK(ssp3_lock);
|
|||
static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
|
||||
|
||||
static DEFINE_SPINLOCK(timer_lock);
|
||||
static const char *timer_parent_names[] = {"clk32", "vctcxo_2", "vctcxo_4", "vctcxo"};
|
||||
static const char *timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"};
|
||||
|
||||
static DEFINE_SPINLOCK(reset_lock);
|
||||
|
||||
|
|
|
@ -194,6 +194,9 @@ static int _freq_tbl_determine_rate(struct clk_hw *hw,
|
|||
|
||||
clk_flags = clk_hw_get_flags(hw);
|
||||
p = clk_hw_get_parent_by_index(hw, index);
|
||||
if (!p)
|
||||
return -EINVAL;
|
||||
|
||||
if (clk_flags & CLK_SET_RATE_PARENT) {
|
||||
if (f->pre_div) {
|
||||
if (!rate)
|
||||
|
|
|
@ -797,7 +797,11 @@ static struct tegra_periph_init_data gate_clks[] = {
|
|||
GATE("vcp", "clk_m", 29, 0, tegra_clk_vcp, 0),
|
||||
GATE("apbdma", "clk_m", 34, 0, tegra_clk_apbdma, 0),
|
||||
GATE("kbc", "clk_32k", 36, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_kbc, 0),
|
||||
GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, 0),
|
||||
/*
|
||||
* Critical for RAM re-repair operation, which must occur on resume
|
||||
* from LP1 system suspend and as part of CCPLEX cluster switching.
|
||||
*/
|
||||
GATE("fuse", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse, CLK_IS_CRITICAL),
|
||||
GATE("fuse_burn", "clk_m", 39, TEGRA_PERIPH_ON_APB, tegra_clk_fuse_burn, 0),
|
||||
GATE("kfuse", "clk_m", 40, TEGRA_PERIPH_ON_APB, tegra_clk_kfuse, 0),
|
||||
GATE("apbif", "clk_m", 107, TEGRA_PERIPH_ON_APB, tegra_clk_apbif, 0),
|
||||
|
|
|
@ -87,7 +87,6 @@
|
|||
struct atmel_aes_caps {
|
||||
bool has_dualbuff;
|
||||
bool has_cfb64;
|
||||
bool has_ctr32;
|
||||
bool has_gcm;
|
||||
u32 max_burst_size;
|
||||
};
|
||||
|
@ -923,8 +922,9 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
|
|||
struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx);
|
||||
struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
|
||||
struct scatterlist *src, *dst;
|
||||
u32 ctr, blocks;
|
||||
size_t datalen;
|
||||
u32 ctr;
|
||||
u16 blocks, start, end;
|
||||
bool use_dma, fragmented = false;
|
||||
|
||||
/* Check for transfer completion. */
|
||||
|
@ -936,27 +936,17 @@ static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd)
|
|||
datalen = req->nbytes - ctx->offset;
|
||||
blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE);
|
||||
ctr = be32_to_cpu(ctx->iv[3]);
|
||||
if (dd->caps.has_ctr32) {
|
||||
/* Check 32bit counter overflow. */
|
||||
u32 start = ctr;
|
||||
u32 end = start + blocks - 1;
|
||||
|
||||
if (end < start) {
|
||||
ctr |= 0xffffffff;
|
||||
datalen = AES_BLOCK_SIZE * -start;
|
||||
fragmented = true;
|
||||
}
|
||||
} else {
|
||||
/* Check 16bit counter overflow. */
|
||||
u16 start = ctr & 0xffff;
|
||||
u16 end = start + (u16)blocks - 1;
|
||||
start = ctr & 0xffff;
|
||||
end = start + blocks - 1;
|
||||
|
||||
if (blocks >> 16 || end < start) {
|
||||
ctr |= 0xffff;
|
||||
datalen = AES_BLOCK_SIZE * (0x10000-start);
|
||||
datalen = AES_BLOCK_SIZE * (0x10000 - start);
|
||||
fragmented = true;
|
||||
}
|
||||
}
|
||||
|
||||
use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD);
|
||||
|
||||
/* Jump to offset. */
|
||||
|
@ -1926,7 +1916,6 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
|
|||
{
|
||||
dd->caps.has_dualbuff = 0;
|
||||
dd->caps.has_cfb64 = 0;
|
||||
dd->caps.has_ctr32 = 0;
|
||||
dd->caps.has_gcm = 0;
|
||||
dd->caps.max_burst_size = 1;
|
||||
|
||||
|
@ -1935,14 +1924,12 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
|
|||
case 0x500:
|
||||
dd->caps.has_dualbuff = 1;
|
||||
dd->caps.has_cfb64 = 1;
|
||||
dd->caps.has_ctr32 = 1;
|
||||
dd->caps.has_gcm = 1;
|
||||
dd->caps.max_burst_size = 4;
|
||||
break;
|
||||
case 0x200:
|
||||
dd->caps.has_dualbuff = 1;
|
||||
dd->caps.has_cfb64 = 1;
|
||||
dd->caps.has_ctr32 = 1;
|
||||
dd->caps.has_gcm = 1;
|
||||
dd->caps.max_burst_size = 4;
|
||||
break;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue