This is the 4.9.87 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlqlSYcACgkQONu9yGCS
 aT4QOBAAlV3X1U64uPZAafv52q/4GVvjh3fymj2aThOImY6cCG2wuQfEHq4lqRKJ
 ua+AY1etuGqJbQlroy8j85bIRtE1pmtW+hAA+vaITcrPJhWzhHmssMCK2aM1Jtim
 +zDXuh4GjXZ+5Gv3p4bbf3otFOgegr2Ftn4dVEmDClkSqRmNEpAtPzSo5TZQCopN
 +QksrFTXMs9QEBxCnfXWHxz0c1EIpaJVtyteX6wVZKC2hjOoF5RBBvErUABW9Po0
 db9+6v488AVouaEqxoLmtcwwUFhetMh/5M4oapD8NeRuwkG+4jFNDZcXIRaCzzyu
 /8DzeyaP1luA/6q0dSI9JD5Whg7FnuZ4A888D+pLHjEQgSgnxhIX38lpsRcrEbl9
 8myFYPoxj6eryM31Z3jW566TI7vT0uqxrc5RzKaFW1xwxLU1Dur5N09VROYIsLvm
 YXUjELN+RwrQa5OkenKbTFy0OPjDHzuo1V/b98T6LujozkyboKAsCUyqE+HukUSe
 Vicg8h5APSPnMSbPLVW5xDDzShBaKhFps92ELsUEZzr5GGS5SX7TBHlbX0bRKs59
 1+hsVts6Twi30/PUJBGkYCAfJ/Jqe0L9l/kj4wnBKWyBLFP5YRerkww2pZ1b/diY
 kwmxH26tSI9g3Nh4oeUB3j5MKu23XhDucB0r2H3pLqfTqFcoT3M=
 =Y/Vz
 -----END PGP SIGNATURE-----

Merge 4.9.87 into android-4.9

Changes in 4.9.87
	tpm: st33zp24: fix potential buffer overruns caused by bit glitches on the bus
	tpm_i2c_infineon: fix potential buffer overruns caused by bit glitches on the bus
	tpm_i2c_nuvoton: fix potential buffer overruns caused by bit glitches on the bus
	tpm_tis: fix potential buffer overruns caused by bit glitches on the bus
	tpm: constify transmit data pointers
	tpm_tis_spi: Use DMA-safe memory for SPI transfers
	tpm-dev-common: Reject too short writes
	ALSA: usb-audio: Add a quirck for B&W PX headphones
	ALSA: hda: Add a power_save blacklist
	ALSA: hda - Fix pincfg at resume on Lenovo T470 dock
	timers: Forward timer base before migrating timers
	parisc: Fix ordering of cache and TLB flushes
	cpufreq: s3c24xx: Fix broken s3c_cpufreq_init()
	dax: fix vma_is_fsdax() helper
	x86/xen: Zero MSR_IA32_SPEC_CTRL before suspend
	x86/platform/intel-mid: Handle Intel Edison reboot correctly
	media: m88ds3103: don't call a non-initalized function
	nospec: Allow index argument to have const-qualified type
	ARM: mvebu: Fix broken PL310_ERRATA_753970 selects
	ARM: kvm: fix building with gcc-8
	KVM: mmu: Fix overlap between public and private memslots
	KVM/x86: Remove indirect MSR op calls from SPEC_CTRL
	KVM/VMX: Optimize vmx_vcpu_run() and svm_vcpu_run() by marking the RDMSR path as unlikely()
	PCI/ASPM: Deal with missing root ports in link state handling
	dm io: fix duplicate bio completion due to missing ref count
	ARM: dts: LogicPD SOM-LV: Fix I2C1 pinmux
	ARM: dts: LogicPD Torpedo: Fix I2C1 pinmux
	x86/mm: Give each mm TLB flush generation a unique ID
	x86/speculation: Use Indirect Branch Prediction Barrier in context switch
	md: only allow remove_and_add_spares when no sync_thread running.
	netlink: put module reference if dump start fails
	x86/apic/vector: Handle legacy irq data correctly
	bridge: check brport attr show in brport_show
	fib_semantics: Don't match route with mismatching tclassid
	hdlc_ppp: carrier detect ok, don't turn off negotiation
	ipv6 sit: work around bogus gcc-8 -Wrestrict warning
	net: fix race on decreasing number of TX queues
	net: ipv4: don't allow setting net.ipv4.route.min_pmtu below 68
	netlink: ensure to loop over all netns in genlmsg_multicast_allns()
	ppp: prevent unregistered channels from connecting to PPP units
	udplite: fix partial checksum initialization
	sctp: fix dst refcnt leak in sctp_v4_get_dst
	mlxsw: spectrum_switchdev: Check success of FDB add operation
	net: phy: fix phy_start to consider PHY_IGNORE_INTERRUPT
	tcp: Honor the eor bit in tcp_mtu_probe
	rxrpc: Fix send in rxrpc_send_data_packet()
	tcp_bbr: better deal with suboptimal GSO
	sctp: fix dst refcnt leak in sctp_v6_get_dst()
	s390/qeth: fix underestimated count of buffer elements
	s390/qeth: fix SETIP command handling
	s390/qeth: fix overestimated count of buffer elements
	s390/qeth: fix IP removal on offline cards
	s390/qeth: fix double-free on IP add/remove race
	s390/qeth: fix IP address lookup for L3 devices
	s390/qeth: fix IPA command submission race
	sctp: verify size of a new chunk in _sctp_make_chunk()
	net: mpls: Pull common label check into helper
	mpls, nospec: Sanitize array index in mpls_label_ok()
	bpf: fix wrong exposure of map_flags into fdinfo for lpm
	bpf: fix mlock precharge on arraymaps
	bpf, x64: implement retpoline for tail call
	bpf, arm64: fix out of bounds access in tail call
	bpf: add schedule points in percpu arrays management
	bpf, ppc64: fix out of bounds access in tail call
	btrfs: preserve i_mode if __btrfs_set_acl() fails
	Linux 4.9.87

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2018-03-11 17:38:31 +01:00
commit a2904940bd
69 changed files with 687 additions and 244 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 9 PATCHLEVEL = 9
SUBLEVEL = 86 SUBLEVEL = 87
EXTRAVERSION = EXTRAVERSION =
NAME = Roaring Lionus NAME = Roaring Lionus

View file

@ -97,6 +97,8 @@
}; };
&i2c1 { &i2c1 {
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins>;
clock-frequency = <2600000>; clock-frequency = <2600000>;
twl: twl@48 { twl: twl@48 {
@ -215,7 +217,12 @@
>; >;
}; };
i2c1_pins: pinmux_i2c1_pins {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
>;
};
}; };
&omap3_pmx_wkup { &omap3_pmx_wkup {

View file

@ -100,6 +100,8 @@
}; };
&i2c1 { &i2c1 {
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins>;
clock-frequency = <2600000>; clock-frequency = <2600000>;
twl: twl@48 { twl: twl@48 {
@ -207,6 +209,12 @@
OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */ OMAP3_CORE1_IOPAD(0x21b8, PIN_INPUT | MUX_MODE0) /* hsusb0_data7.hsusb0_data7 */
>; >;
}; };
i2c1_pins: pinmux_i2c1_pins {
pinctrl-single,pins = <
OMAP3_CORE1_IOPAD(0x21ba, PIN_INPUT | MUX_MODE0) /* i2c1_scl.i2c1_scl */
OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
>;
};
}; };
&uart2 { &uart2 {

View file

@ -6,6 +6,8 @@ ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
KVM=../../../../virt/kvm KVM=../../../../virt/kvm
CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
@ -14,7 +16,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += vfp.o obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE)
obj-$(CONFIG_KVM_ARM_HOST) += entry.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
obj-$(CONFIG_KVM_ARM_HOST) += switch.o obj-$(CONFIG_KVM_ARM_HOST) += switch.o
CFLAGS_switch.o += $(CFLAGS_ARMV7VE)
obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o

View file

@ -20,6 +20,10 @@
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
/*
* gcc before 4.9 doesn't understand -march=armv7ve, so we have to
* trick the assembler.
*/
__asm__(".arch_extension virt"); __asm__(".arch_extension virt");
void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt) void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)

View file

@ -42,7 +42,7 @@ config MACH_ARMADA_375
depends on ARCH_MULTI_V7 depends on ARCH_MULTI_V7
select ARMADA_370_XP_IRQ select ARMADA_370_XP_IRQ
select ARM_ERRATA_720789 select ARM_ERRATA_720789
select ARM_ERRATA_753970 select PL310_ERRATA_753970
select ARM_GIC select ARM_GIC
select ARMADA_375_CLK select ARMADA_375_CLK
select HAVE_ARM_SCU select HAVE_ARM_SCU
@ -58,7 +58,7 @@ config MACH_ARMADA_38X
bool "Marvell Armada 380/385 boards" bool "Marvell Armada 380/385 boards"
depends on ARCH_MULTI_V7 depends on ARCH_MULTI_V7
select ARM_ERRATA_720789 select ARM_ERRATA_720789
select ARM_ERRATA_753970 select PL310_ERRATA_753970
select ARM_GIC select ARM_GIC
select ARMADA_370_XP_IRQ select ARMADA_370_XP_IRQ
select ARMADA_38X_CLK select ARMADA_38X_CLK

View file

@ -234,8 +234,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
off = offsetof(struct bpf_array, map.max_entries); off = offsetof(struct bpf_array, map.max_entries);
emit_a64_mov_i64(tmp, off, ctx); emit_a64_mov_i64(tmp, off, ctx);
emit(A64_LDR32(tmp, r2, tmp), ctx); emit(A64_LDR32(tmp, r2, tmp), ctx);
emit(A64_MOV(0, r3, r3), ctx);
emit(A64_CMP(0, r3, tmp), ctx); emit(A64_CMP(0, r3, tmp), ctx);
emit(A64_B_(A64_COND_GE, jmp_offset), ctx); emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
/* if (tail_call_cnt > MAX_TAIL_CALL_CNT) /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out; * goto out;
@ -243,7 +244,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
*/ */
emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx); emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
emit(A64_CMP(1, tcc, tmp), ctx); emit(A64_CMP(1, tcc, tmp), ctx);
emit(A64_B_(A64_COND_GT, jmp_offset), ctx); emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
emit(A64_ADD_I(1, tcc, tcc, 1), ctx); emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
/* prog = array->ptrs[index]; /* prog = array->ptrs[index];

View file

@ -25,6 +25,7 @@ void flush_user_icache_range_asm(unsigned long, unsigned long);
void flush_kernel_icache_range_asm(unsigned long, unsigned long); void flush_kernel_icache_range_asm(unsigned long, unsigned long);
void flush_user_dcache_range_asm(unsigned long, unsigned long); void flush_user_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_range_asm(unsigned long, unsigned long); void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
void purge_kernel_dcache_range_asm(unsigned long, unsigned long);
void flush_kernel_dcache_page_asm(void *); void flush_kernel_dcache_page_asm(void *);
void flush_kernel_icache_page(void *); void flush_kernel_icache_page(void *);
void flush_user_dcache_range(unsigned long, unsigned long); void flush_user_dcache_range(unsigned long, unsigned long);

View file

@ -464,10 +464,10 @@ EXPORT_SYMBOL(copy_user_page);
int __flush_tlb_range(unsigned long sid, unsigned long start, int __flush_tlb_range(unsigned long sid, unsigned long start,
unsigned long end) unsigned long end)
{ {
unsigned long flags, size; unsigned long flags;
size = (end - start); if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
if (size >= parisc_tlb_flush_threshold) { end - start >= parisc_tlb_flush_threshold) {
flush_tlb_all(); flush_tlb_all();
return 1; return 1;
} }
@ -538,13 +538,11 @@ void flush_cache_mm(struct mm_struct *mm)
struct vm_area_struct *vma; struct vm_area_struct *vma;
pgd_t *pgd; pgd_t *pgd;
/* Flush the TLB to avoid speculation if coherency is required. */
if (parisc_requires_coherency())
flush_tlb_all();
/* Flushing the whole cache on each cpu takes forever on /* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */ rp3440, etc. So, avoid it if the mm isn't too big. */
if (mm_total_size(mm) >= parisc_cache_flush_threshold) { if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
mm_total_size(mm) >= parisc_cache_flush_threshold) {
flush_tlb_all();
flush_cache_all(); flush_cache_all();
return; return;
} }
@ -552,9 +550,9 @@ void flush_cache_mm(struct mm_struct *mm)
if (mm->context == mfsp(3)) { if (mm->context == mfsp(3)) {
for (vma = mm->mmap; vma; vma = vma->vm_next) { for (vma = mm->mmap; vma; vma = vma->vm_next) {
flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
if ((vma->vm_flags & VM_EXEC) == 0) if (vma->vm_flags & VM_EXEC)
continue;
flush_user_icache_range_asm(vma->vm_start, vma->vm_end); flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
flush_tlb_range(vma, vma->vm_start, vma->vm_end);
} }
return; return;
} }
@ -598,14 +596,9 @@ flush_user_icache_range(unsigned long start, unsigned long end)
void flush_cache_range(struct vm_area_struct *vma, void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end) unsigned long start, unsigned long end)
{ {
BUG_ON(!vma->vm_mm->context); if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
end - start >= parisc_cache_flush_threshold) {
/* Flush the TLB to avoid speculation if coherency is required. */
if (parisc_requires_coherency())
flush_tlb_range(vma, start, end); flush_tlb_range(vma, start, end);
if ((end - start) >= parisc_cache_flush_threshold
|| vma->vm_mm->context != mfsp(3)) {
flush_cache_all(); flush_cache_all();
return; return;
} }
@ -613,6 +606,7 @@ void flush_cache_range(struct vm_area_struct *vma,
flush_user_dcache_range_asm(start, end); flush_user_dcache_range_asm(start, end);
if (vma->vm_flags & VM_EXEC) if (vma->vm_flags & VM_EXEC)
flush_user_icache_range_asm(start, end); flush_user_icache_range_asm(start, end);
flush_tlb_range(vma, start, end);
} }
void void
@ -621,7 +615,6 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
BUG_ON(!vma->vm_mm->context); BUG_ON(!vma->vm_mm->context);
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
if (parisc_requires_coherency())
flush_tlb_page(vma, vmaddr); flush_tlb_page(vma, vmaddr);
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
} }
@ -630,21 +623,33 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
void flush_kernel_vmap_range(void *vaddr, int size) void flush_kernel_vmap_range(void *vaddr, int size)
{ {
unsigned long start = (unsigned long)vaddr; unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
if ((unsigned long)size > parisc_cache_flush_threshold) if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
flush_tlb_kernel_range(start, end);
flush_data_cache(); flush_data_cache();
else return;
flush_kernel_dcache_range_asm(start, start + size); }
flush_kernel_dcache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
} }
EXPORT_SYMBOL(flush_kernel_vmap_range); EXPORT_SYMBOL(flush_kernel_vmap_range);
void invalidate_kernel_vmap_range(void *vaddr, int size) void invalidate_kernel_vmap_range(void *vaddr, int size)
{ {
unsigned long start = (unsigned long)vaddr; unsigned long start = (unsigned long)vaddr;
unsigned long end = start + size;
if ((unsigned long)size > parisc_cache_flush_threshold) if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
(unsigned long)size >= parisc_cache_flush_threshold) {
flush_tlb_kernel_range(start, end);
flush_data_cache(); flush_data_cache();
else return;
flush_kernel_dcache_range_asm(start, start + size); }
purge_kernel_dcache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
} }
EXPORT_SYMBOL(invalidate_kernel_vmap_range); EXPORT_SYMBOL(invalidate_kernel_vmap_range);

View file

@ -1110,6 +1110,28 @@ ENTRY_CFI(flush_kernel_dcache_range_asm)
.procend .procend
ENDPROC_CFI(flush_kernel_dcache_range_asm) ENDPROC_CFI(flush_kernel_dcache_range_asm)
ENTRY_CFI(purge_kernel_dcache_range_asm)
.proc
.callinfo NO_CALLS
.entry
ldil L%dcache_stride, %r1
ldw R%dcache_stride(%r1), %r23
ldo -1(%r23), %r21
ANDCM %r26, %r21, %r26
1: cmpb,COND(<<),n %r26, %r25,1b
pdc,m %r23(%r26)
sync
syncdma
bv %r0(%r2)
nop
.exit
.procend
ENDPROC_CFI(purge_kernel_dcache_range_asm)
ENTRY_CFI(flush_user_icache_range_asm) ENTRY_CFI(flush_user_icache_range_asm)
.proc .proc
.callinfo NO_CALLS .callinfo NO_CALLS

View file

@ -245,6 +245,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
* goto out; * goto out;
*/ */
PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)); PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
PPC_CMPLW(b2p_index, b2p[TMP_REG_1]); PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
PPC_BCC(COND_GE, out); PPC_BCC(COND_GE, out);

View file

@ -3,12 +3,18 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/atomic.h>
/* /*
* The x86 doesn't have a mmu context, but * x86 has arch-specific MMU state beyond what lives in mm_struct.
* we put the segment information here.
*/ */
typedef struct { typedef struct {
/*
* ctx_id uniquely identifies this mm_struct. A ctx_id will never
* be reused, and zero is not a valid ctx_id.
*/
u64 ctx_id;
#ifdef CONFIG_MODIFY_LDT_SYSCALL #ifdef CONFIG_MODIFY_LDT_SYSCALL
struct ldt_struct *ldt; struct ldt_struct *ldt;
#endif #endif
@ -33,6 +39,11 @@ typedef struct {
#endif #endif
} mm_context_t; } mm_context_t;
#define INIT_MM_CONTEXT(mm) \
.context = { \
.ctx_id = 1, \
}
void leave_mm(int cpu); void leave_mm(int cpu);
#endif /* _ASM_X86_MMU_H */ #endif /* _ASM_X86_MMU_H */

View file

@ -12,6 +12,9 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/mpx.h> #include <asm/mpx.h>
extern atomic64_t last_mm_ctx_id;
#ifndef CONFIG_PARAVIRT #ifndef CONFIG_PARAVIRT
static inline void paravirt_activate_mm(struct mm_struct *prev, static inline void paravirt_activate_mm(struct mm_struct *prev,
struct mm_struct *next) struct mm_struct *next)
@ -106,6 +109,8 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
static inline int init_new_context(struct task_struct *tsk, static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm) struct mm_struct *mm)
{ {
mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
if (cpu_feature_enabled(X86_FEATURE_OSPKE)) { if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
/* pkey 0 is the default and always allocated */ /* pkey 0 is the default and always allocated */

View file

@ -177,4 +177,41 @@ static inline void indirect_branch_prediction_barrier(void)
} }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
/*
* Below is used in the eBPF JIT compiler and emits the byte sequence
* for the following assembly:
*
* With retpolines configured:
*
* callq do_rop
* spec_trap:
* pause
* lfence
* jmp spec_trap
* do_rop:
* mov %rax,(%rsp)
* retq
*
* Without retpolines configured:
*
* jmp *%rax
*/
#ifdef CONFIG_RETPOLINE
# define RETPOLINE_RAX_BPF_JIT_SIZE 17
# define RETPOLINE_RAX_BPF_JIT() \
EMIT1_off32(0xE8, 7); /* callq do_rop */ \
/* spec_trap: */ \
EMIT2(0xF3, 0x90); /* pause */ \
EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
/* do_rop: */ \
EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
EMIT1(0xC3); /* retq */
#else
# define RETPOLINE_RAX_BPF_JIT_SIZE 2
# define RETPOLINE_RAX_BPF_JIT() \
EMIT2(0xFF, 0xE0); /* jmp *%rax */
#endif
#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */

View file

@ -68,6 +68,8 @@ static inline void invpcid_flush_all_nonglobals(void)
struct tlb_state { struct tlb_state {
struct mm_struct *active_mm; struct mm_struct *active_mm;
int state; int state;
/* last user mm's ctx id */
u64 last_ctx_id;
/* /*
* Access to this CR4 shadow and to H/W CR4 is protected by * Access to this CR4 shadow and to H/W CR4 is protected by

View file

@ -93,8 +93,12 @@ out_data:
return NULL; return NULL;
} }
static void free_apic_chip_data(struct apic_chip_data *data) static void free_apic_chip_data(unsigned int virq, struct apic_chip_data *data)
{ {
#ifdef CONFIG_X86_IO_APIC
if (virq < nr_legacy_irqs())
legacy_irq_data[virq] = NULL;
#endif
if (data) { if (data) {
free_cpumask_var(data->domain); free_cpumask_var(data->domain);
free_cpumask_var(data->old_domain); free_cpumask_var(data->old_domain);
@ -318,11 +322,7 @@ static void x86_vector_free_irqs(struct irq_domain *domain,
apic_data = irq_data->chip_data; apic_data = irq_data->chip_data;
irq_domain_reset_irq_data(irq_data); irq_domain_reset_irq_data(irq_data);
raw_spin_unlock_irqrestore(&vector_lock, flags); raw_spin_unlock_irqrestore(&vector_lock, flags);
free_apic_chip_data(apic_data); free_apic_chip_data(virq + i, apic_data);
#ifdef CONFIG_X86_IO_APIC
if (virq + i < nr_legacy_irqs())
legacy_irq_data[virq + i] = NULL;
#endif
} }
} }
} }
@ -363,7 +363,7 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
err = assign_irq_vector_policy(virq + i, node, data, info); err = assign_irq_vector_policy(virq + i, node, data, info);
if (err) { if (err) {
irq_data->chip_data = NULL; irq_data->chip_data = NULL;
free_apic_chip_data(data); free_apic_chip_data(virq + i, data);
goto error; goto error;
} }
} }

View file

@ -44,6 +44,7 @@
#include <asm/debugreg.h> #include <asm/debugreg.h>
#include <asm/kvm_para.h> #include <asm/kvm_para.h>
#include <asm/irq_remapping.h> #include <asm/irq_remapping.h>
#include <asm/microcode.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
#include <asm/virtext.h> #include <asm/virtext.h>
@ -4919,7 +4920,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
* being speculatively taken. * being speculatively taken.
*/ */
if (svm->spec_ctrl) if (svm->spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
asm volatile ( asm volatile (
"push %%" _ASM_BP "; \n\t" "push %%" _ASM_BP "; \n\t"
@ -5028,11 +5029,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
* If the L02 MSR bitmap does not intercept the MSR, then we need to * If the L02 MSR bitmap does not intercept the MSR, then we need to
* save it. * save it.
*/ */
if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
rdmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl); svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
if (svm->spec_ctrl) if (svm->spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, 0); native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
/* Eliminate branch target predictions from guest mode */ /* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB(); vmexit_fill_RSB();

View file

@ -49,6 +49,7 @@
#include <asm/kexec.h> #include <asm/kexec.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/irq_remapping.h> #include <asm/irq_remapping.h>
#include <asm/microcode.h>
#include <asm/nospec-branch.h> #include <asm/nospec-branch.h>
#include "trace.h" #include "trace.h"
@ -8905,7 +8906,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* being speculatively taken. * being speculatively taken.
*/ */
if (vmx->spec_ctrl) if (vmx->spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
vmx->__launched = vmx->loaded_vmcs->launched; vmx->__launched = vmx->loaded_vmcs->launched;
asm( asm(
@ -9040,11 +9041,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* If the L02 MSR bitmap does not intercept the MSR, then we need to * If the L02 MSR bitmap does not intercept the MSR, then we need to
* save it. * save it.
*/ */
if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
rdmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl); vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
if (vmx->spec_ctrl) if (vmx->spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, 0); native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
/* Eliminate branch target predictions from guest mode */ /* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB(); vmexit_fill_RSB();

View file

@ -10,6 +10,7 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/nospec-branch.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/uv/uv.h> #include <asm/uv/uv.h>
@ -29,6 +30,8 @@
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
*/ */
atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
struct flush_tlb_info { struct flush_tlb_info {
struct mm_struct *flush_mm; struct mm_struct *flush_mm;
unsigned long flush_start; unsigned long flush_start;
@ -104,6 +107,28 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
unsigned cpu = smp_processor_id(); unsigned cpu = smp_processor_id();
if (likely(prev != next)) { if (likely(prev != next)) {
u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
/*
* Avoid user/user BTB poisoning by flushing the branch
* predictor when switching between processes. This stops
* one process from doing Spectre-v2 attacks on another.
*
* As an optimization, flush indirect branches only when
* switching into processes that disable dumping. This
* protects high value processes like gpg, without having
* too high performance overhead. IBPB is *expensive*!
*
* This will not flush branches when switching into kernel
* threads. It will also not flush if we switch to idle
* thread and back to the same process. It will flush if we
* switch to a different non-dumpable process.
*/
if (tsk && tsk->mm &&
tsk->mm->context.ctx_id != last_ctx_id &&
get_dumpable(tsk->mm) != SUID_DUMP_USER)
indirect_branch_prediction_barrier();
if (IS_ENABLED(CONFIG_VMAP_STACK)) { if (IS_ENABLED(CONFIG_VMAP_STACK)) {
/* /*
* If our current stack is in vmalloc space and isn't * If our current stack is in vmalloc space and isn't
@ -118,6 +143,14 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
set_pgd(pgd, init_mm.pgd[stack_pgd_index]); set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
} }
/*
* Record last user mm's context id, so we can avoid
* flushing branch buffer with IBPB if we switch back
* to the same user.
*/
if (next != &init_mm)
this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
this_cpu_write(cpu_tlbstate.active_mm, next); this_cpu_write(cpu_tlbstate.active_mm, next);

View file

@ -12,6 +12,7 @@
#include <linux/filter.h> #include <linux/filter.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/nospec-branch.h>
#include <linux/bpf.h> #include <linux/bpf.h>
int bpf_jit_enable __read_mostly; int bpf_jit_enable __read_mostly;
@ -281,7 +282,7 @@ static void emit_bpf_tail_call(u8 **pprog)
EMIT2(0x89, 0xD2); /* mov edx, edx */ EMIT2(0x89, 0xD2); /* mov edx, edx */
EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
offsetof(struct bpf_array, map.max_entries)); offsetof(struct bpf_array, map.max_entries));
#define OFFSET1 43 /* number of bytes to jump */ #define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */
EMIT2(X86_JBE, OFFSET1); /* jbe out */ EMIT2(X86_JBE, OFFSET1); /* jbe out */
label1 = cnt; label1 = cnt;
@ -290,7 +291,7 @@ static void emit_bpf_tail_call(u8 **pprog)
*/ */
EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */ EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
#define OFFSET2 32 #define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
EMIT2(X86_JA, OFFSET2); /* ja out */ EMIT2(X86_JA, OFFSET2); /* ja out */
label2 = cnt; label2 = cnt;
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
@ -304,7 +305,7 @@ static void emit_bpf_tail_call(u8 **pprog)
* goto out; * goto out;
*/ */
EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
#define OFFSET3 10 #define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
EMIT2(X86_JE, OFFSET3); /* je out */ EMIT2(X86_JE, OFFSET3); /* je out */
label3 = cnt; label3 = cnt;
@ -317,7 +318,7 @@ static void emit_bpf_tail_call(u8 **pprog)
* rdi == ctx (1st arg) * rdi == ctx (1st arg)
* rax == prog->bpf_func + prologue_size * rax == prog->bpf_func + prologue_size
*/ */
EMIT2(0xFF, 0xE0); /* jmp rax */ RETPOLINE_RAX_BPF_JIT();
/* out: */ /* out: */
BUILD_BUG_ON(cnt - label1 != OFFSET1); BUILD_BUG_ON(cnt - label1 != OFFSET1);

View file

@ -79,7 +79,7 @@ static void intel_mid_power_off(void)
static void intel_mid_reboot(void) static void intel_mid_reboot(void)
{ {
intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0); intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
} }
static unsigned long __init intel_mid_calibrate_tsc(void) static unsigned long __init intel_mid_calibrate_tsc(void)

View file

@ -1,11 +1,14 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/tick.h> #include <linux/tick.h>
#include <linux/percpu-defs.h>
#include <xen/xen.h> #include <xen/xen.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
#include <xen/grant_table.h> #include <xen/grant_table.h>
#include <xen/events.h> #include <xen/events.h>
#include <asm/cpufeatures.h>
#include <asm/msr-index.h>
#include <asm/xen/hypercall.h> #include <asm/xen/hypercall.h>
#include <asm/xen/page.h> #include <asm/xen/page.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
@ -68,6 +71,8 @@ static void xen_pv_post_suspend(int suspend_cancelled)
xen_mm_unpin_all(); xen_mm_unpin_all();
} }
static DEFINE_PER_CPU(u64, spec_ctrl);
void xen_arch_pre_suspend(void) void xen_arch_pre_suspend(void)
{ {
if (xen_pv_domain()) if (xen_pv_domain())
@ -84,6 +89,9 @@ void xen_arch_post_suspend(int cancelled)
static void xen_vcpu_notify_restore(void *data) static void xen_vcpu_notify_restore(void *data)
{ {
if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl));
/* Boot processor notified via generic timekeeping_resume() */ /* Boot processor notified via generic timekeeping_resume() */
if (smp_processor_id() == 0) if (smp_processor_id() == 0)
return; return;
@ -93,7 +101,15 @@ static void xen_vcpu_notify_restore(void *data)
static void xen_vcpu_notify_suspend(void *data) static void xen_vcpu_notify_suspend(void *data)
{ {
u64 tmp;
tick_suspend_local(); tick_suspend_local();
if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) {
rdmsrl(MSR_IA32_SPEC_CTRL, tmp);
this_cpu_write(spec_ctrl, tmp);
wrmsrl(MSR_IA32_SPEC_CTRL, 0);
}
} }
void xen_arch_resume(void) void xen_arch_resume(void)

View file

@ -458,7 +458,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
size_t count) size_t count)
{ {
int size = 0; int size = 0;
int expected; u32 expected;
if (!chip) if (!chip)
return -EBUSY; return -EBUSY;
@ -475,7 +475,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf,
} }
expected = be32_to_cpu(*(__be32 *)(buf + 2)); expected = be32_to_cpu(*(__be32 *)(buf + 2));
if (expected > count) { if (expected > count || expected < TPM_HEADER_SIZE) {
size = -EIO; size = -EIO;
goto out; goto out;
} }

View file

@ -136,6 +136,12 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
return -EFAULT; return -EFAULT;
} }
if (in_size < 6 ||
in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) {
mutex_unlock(&priv->buffer_mutex);
return -EINVAL;
}
/* atomic tpm command send and result receive. We only hold the ops /* atomic tpm command send and result receive. We only hold the ops
* lock during this period so that the tpm can be unregistered even if * lock during this period so that the tpm can be unregistered even if
* the char dev is held open. * the char dev is held open.

View file

@ -437,7 +437,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{ {
int size = 0; int size = 0;
int expected, status; int status;
u32 expected;
if (count < TPM_HEADER_SIZE) { if (count < TPM_HEADER_SIZE) {
size = -EIO; size = -EIO;
@ -452,7 +453,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count)
} }
expected = be32_to_cpu(*(__be32 *)(buf + 2)); expected = be32_to_cpu(*(__be32 *)(buf + 2));
if ((size_t) expected > count) { if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) {
size = -EIO; size = -EIO;
goto out; goto out;
} }

View file

@ -281,7 +281,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
struct device *dev = chip->dev.parent; struct device *dev = chip->dev.parent;
struct i2c_client *client = to_i2c_client(dev); struct i2c_client *client = to_i2c_client(dev);
s32 rc; s32 rc;
int expected, status, burst_count, retries, size = 0; int status;
int burst_count;
int retries;
int size = 0;
u32 expected;
if (count < TPM_HEADER_SIZE) { if (count < TPM_HEADER_SIZE) {
i2c_nuvoton_ready(chip); /* return to idle */ i2c_nuvoton_ready(chip); /* return to idle */
@ -323,7 +327,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
* to machine native * to machine native
*/ */
expected = be32_to_cpu(*(__be32 *) (buf + 2)); expected = be32_to_cpu(*(__be32 *) (buf + 2));
if (expected > count) { if (expected > count || expected < size) {
dev_err(dev, "%s() expected > count\n", __func__); dev_err(dev, "%s() expected > count\n", __func__);
size = -EIO; size = -EIO;
continue; continue;

View file

@ -98,7 +98,7 @@ static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
} }
static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *value) const u8 *value)
{ {
struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data);

View file

@ -208,7 +208,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{ {
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int size = 0; int size = 0;
int expected, status; int status;
u32 expected;
if (count < TPM_HEADER_SIZE) { if (count < TPM_HEADER_SIZE) {
size = -EIO; size = -EIO;
@ -223,7 +224,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
} }
expected = be32_to_cpu(*(__be32 *) (buf + 2)); expected = be32_to_cpu(*(__be32 *) (buf + 2));
if (expected > count) { if (expected > count || expected < TPM_HEADER_SIZE) {
size = -EIO; size = -EIO;
goto out; goto out;
} }
@ -256,7 +257,7 @@ out:
* tpm.c can skip polling for the data to be available as the interrupt is * tpm.c can skip polling for the data to be available as the interrupt is
* waited for here * waited for here
*/ */
static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len) static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len)
{ {
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc, status, burstcnt; int rc, status, burstcnt;
@ -345,7 +346,7 @@ static void disable_interrupts(struct tpm_chip *chip)
* tpm.c can skip polling for the data to be available as the interrupt is * tpm.c can skip polling for the data to be available as the interrupt is
* waited for here * waited for here
*/ */
static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len) static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len)
{ {
struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
int rc; int rc;

View file

@ -98,7 +98,7 @@ struct tpm_tis_phy_ops {
int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len, int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *result); u8 *result);
int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len, int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *value); const u8 *value);
int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result); int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result);
int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result); int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result);
int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src); int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src);
@ -128,7 +128,7 @@ static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr,
} }
static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr, static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr,
u16 len, u8 *value) u16 len, const u8 *value)
{ {
return data->phy_ops->write_bytes(data, addr, len, value); return data->phy_ops->write_bytes(data, addr, len, value);
} }

View file

@ -47,9 +47,7 @@
struct tpm_tis_spi_phy { struct tpm_tis_spi_phy {
struct tpm_tis_data priv; struct tpm_tis_data priv;
struct spi_device *spi_device; struct spi_device *spi_device;
u8 *iobuf;
u8 tx_buf[4];
u8 rx_buf[4];
}; };
static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data) static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data)
@ -58,7 +56,7 @@ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *da
} }
static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
u8 *buffer, u8 direction) u8 *in, const u8 *out)
{ {
struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data);
int ret = 0; int ret = 0;
@ -72,14 +70,14 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
while (len) { while (len) {
transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE); transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE);
phy->tx_buf[0] = direction | (transfer_len - 1); phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1);
phy->tx_buf[1] = 0xd4; phy->iobuf[1] = 0xd4;
phy->tx_buf[2] = addr >> 8; phy->iobuf[2] = addr >> 8;
phy->tx_buf[3] = addr; phy->iobuf[3] = addr;
memset(&spi_xfer, 0, sizeof(spi_xfer)); memset(&spi_xfer, 0, sizeof(spi_xfer));
spi_xfer.tx_buf = phy->tx_buf; spi_xfer.tx_buf = phy->iobuf;
spi_xfer.rx_buf = phy->rx_buf; spi_xfer.rx_buf = phy->iobuf;
spi_xfer.len = 4; spi_xfer.len = 4;
spi_xfer.cs_change = 1; spi_xfer.cs_change = 1;
@ -89,9 +87,9 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
if (ret < 0) if (ret < 0)
goto exit; goto exit;
if ((phy->rx_buf[3] & 0x01) == 0) { if ((phy->iobuf[3] & 0x01) == 0) {
// handle SPI wait states // handle SPI wait states
phy->tx_buf[0] = 0; phy->iobuf[0] = 0;
for (i = 0; i < TPM_RETRY; i++) { for (i = 0; i < TPM_RETRY; i++) {
spi_xfer.len = 1; spi_xfer.len = 1;
@ -100,7 +98,7 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
ret = spi_sync_locked(phy->spi_device, &m); ret = spi_sync_locked(phy->spi_device, &m);
if (ret < 0) if (ret < 0)
goto exit; goto exit;
if (phy->rx_buf[0] & 0x01) if (phy->iobuf[0] & 0x01)
break; break;
} }
@ -114,12 +112,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
spi_xfer.len = transfer_len; spi_xfer.len = transfer_len;
spi_xfer.delay_usecs = 5; spi_xfer.delay_usecs = 5;
if (direction) { if (in) {
spi_xfer.tx_buf = NULL; spi_xfer.tx_buf = NULL;
spi_xfer.rx_buf = buffer; } else if (out) {
} else {
spi_xfer.tx_buf = buffer;
spi_xfer.rx_buf = NULL; spi_xfer.rx_buf = NULL;
memcpy(phy->iobuf, out, transfer_len);
out += transfer_len;
} }
spi_message_init(&m); spi_message_init(&m);
@ -128,8 +126,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
if (ret < 0) if (ret < 0)
goto exit; goto exit;
if (in) {
memcpy(in, phy->iobuf, transfer_len);
in += transfer_len;
}
len -= transfer_len; len -= transfer_len;
buffer += transfer_len;
} }
exit: exit:
@ -140,13 +142,13 @@ exit:
static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr, static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr,
u16 len, u8 *result) u16 len, u8 *result)
{ {
return tpm_tis_spi_transfer(data, addr, len, result, 0x80); return tpm_tis_spi_transfer(data, addr, len, result, NULL);
} }
static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr, static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr,
u16 len, u8 *value) u16 len, const u8 *value)
{ {
return tpm_tis_spi_transfer(data, addr, len, value, 0); return tpm_tis_spi_transfer(data, addr, len, NULL, value);
} }
static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result) static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result)
@ -195,6 +197,10 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
phy->spi_device = dev; phy->spi_device = dev;
phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL);
if (!phy->iobuf)
return -ENOMEM;
return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops, return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
NULL); NULL);
} }

View file

@ -351,7 +351,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
static int s3c_cpufreq_init(struct cpufreq_policy *policy) static int s3c_cpufreq_init(struct cpufreq_policy *policy)
{ {
policy->clk = clk_arm; policy->clk = clk_arm;
return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency);
policy->cpuinfo.transition_latency = cpu_cur.info->latency;
if (ftab)
return cpufreq_table_validate_and_show(policy, ftab);
return 0;
} }
static int __init s3c_cpufreq_initclks(void) static int __init s3c_cpufreq_initclks(void)

View file

@ -302,6 +302,7 @@ static void do_region(int op, int op_flags, unsigned region,
special_cmd_max_sectors = q->limits.max_write_same_sectors; special_cmd_max_sectors = q->limits.max_write_same_sectors;
if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_SAME) && if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_SAME) &&
special_cmd_max_sectors == 0) { special_cmd_max_sectors == 0) {
atomic_inc(&io->count);
dec_count(io, region, -EOPNOTSUPP); dec_count(io, region, -EOPNOTSUPP);
return; return;
} }

View file

@ -8224,6 +8224,10 @@ static int remove_and_add_spares(struct mddev *mddev,
int removed = 0; int removed = 0;
bool remove_some = false; bool remove_some = false;
if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
/* Mustn't remove devices when resync thread is running */
return 0;
rdev_for_each(rdev, mddev) { rdev_for_each(rdev, mddev) {
if ((this == NULL || rdev == this) && if ((this == NULL || rdev == this) &&
rdev->raid_disk >= 0 && rdev->raid_disk >= 0 &&

View file

@ -1262,11 +1262,12 @@ static int m88ds3103_select(struct i2c_mux_core *muxc, u32 chan)
* New users must use I2C client binding directly! * New users must use I2C client binding directly!
*/ */
struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg, struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg,
struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter) struct i2c_adapter *i2c,
struct i2c_adapter **tuner_i2c_adapter)
{ {
struct i2c_client *client; struct i2c_client *client;
struct i2c_board_info board_info; struct i2c_board_info board_info;
struct m88ds3103_platform_data pdata; struct m88ds3103_platform_data pdata = {};
pdata.clk = cfg->clock; pdata.clk = cfg->clock;
pdata.i2c_wr_max = cfg->i2c_wr_max; pdata.i2c_wr_max = cfg->i2c_wr_max;
@ -1409,6 +1410,8 @@ static int m88ds3103_probe(struct i2c_client *client,
case M88DS3103_CHIP_ID: case M88DS3103_CHIP_ID:
break; break;
default: default:
ret = -ENODEV;
dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id);
goto err_kfree; goto err_kfree;
} }

View file

@ -809,6 +809,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool dynamic) bool dynamic)
{ {
char *sfd_pl; char *sfd_pl;
u8 num_rec;
int err; int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@ -818,9 +819,16 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
mac, fid, action, local_port); mac, fid, action, local_port);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
kfree(sfd_pl); if (err)
goto out;
if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
err = -EBUSY;
out:
kfree(sfd_pl);
return err; return err;
} }
@ -845,6 +853,7 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
bool adding, bool dynamic) bool adding, bool dynamic)
{ {
char *sfd_pl; char *sfd_pl;
u8 num_rec;
int err; int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@ -855,9 +864,16 @@ static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic), mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP, mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
lag_vid, lag_id); lag_vid, lag_id);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
kfree(sfd_pl); if (err)
goto out;
if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
err = -EBUSY;
out:
kfree(sfd_pl);
return err; return err;
} }
@ -891,6 +907,7 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
u16 fid, u16 mid, bool adding) u16 fid, u16 mid, bool adding)
{ {
char *sfd_pl; char *sfd_pl;
u8 num_rec;
int err; int err;
sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL); sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
@ -900,7 +917,15 @@ static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0); mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid, mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
MLXSW_REG_SFD_REC_ACTION_NOP, mid); MLXSW_REG_SFD_REC_ACTION_NOP, mid);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
goto out;
if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
err = -EBUSY;
out:
kfree(sfd_pl); kfree(sfd_pl);
return err; return err;
} }

View file

@ -925,7 +925,7 @@ void phy_start(struct phy_device *phydev)
break; break;
case PHY_HALTED: case PHY_HALTED:
/* make sure interrupts are re-enabled for the PHY */ /* make sure interrupts are re-enabled for the PHY */
if (phydev->irq != PHY_POLL) { if (phy_interrupt_is_valid(phydev)) {
err = phy_enable_interrupts(phydev); err = phy_enable_interrupts(phydev);
if (err < 0) if (err < 0)
break; break;

View file

@ -3157,6 +3157,15 @@ ppp_connect_channel(struct channel *pch, int unit)
goto outl; goto outl;
ppp_lock(ppp); ppp_lock(ppp);
spin_lock_bh(&pch->downl);
if (!pch->chan) {
/* Don't connect unregistered channels */
spin_unlock_bh(&pch->downl);
ppp_unlock(ppp);
ret = -ENOTCONN;
goto outl;
}
spin_unlock_bh(&pch->downl);
if (pch->file.hdrlen > ppp->file.hdrlen) if (pch->file.hdrlen > ppp->file.hdrlen)
ppp->file.hdrlen = pch->file.hdrlen; ppp->file.hdrlen = pch->file.hdrlen;
hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */

View file

@ -574,7 +574,10 @@ static void ppp_timer(unsigned long arg)
ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
0, NULL); 0, NULL);
proto->restart_counter--; proto->restart_counter--;
} else } else if (netif_carrier_ok(proto->dev))
ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
0, NULL);
else
ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
0, NULL); 0, NULL);
break; break;

View file

@ -526,10 +526,14 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
/* /*
* Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
* hierarchies. * hierarchies. Note that some PCIe host implementations omit
* the root ports entirely, in which case a downstream port on
* a switch may become the root of the link state chain for all
* its subordinate endpoints.
*/ */
if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) { pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE ||
!pdev->bus->parent->self) {
link->root = link; link->root = link;
} else { } else {
struct pcie_link_state *parent; struct pcie_link_state *parent;

View file

@ -592,6 +592,11 @@ struct qeth_cmd_buffer {
void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *); void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *);
}; };
static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob)
{
return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
}
/** /**
* definition of a qeth channel, used for read and write * definition of a qeth channel, used for read and write
*/ */
@ -849,7 +854,7 @@ struct qeth_trap_id {
*/ */
static inline int qeth_get_elements_for_range(addr_t start, addr_t end) static inline int qeth_get_elements_for_range(addr_t start, addr_t end)
{ {
return PFN_UP(end - 1) - PFN_DOWN(start); return PFN_UP(end) - PFN_DOWN(start);
} }
static inline int qeth_get_micros(void) static inline int qeth_get_micros(void)

View file

@ -2050,7 +2050,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
unsigned long flags; unsigned long flags;
struct qeth_reply *reply = NULL; struct qeth_reply *reply = NULL;
unsigned long timeout, event_timeout; unsigned long timeout, event_timeout;
struct qeth_ipa_cmd *cmd; struct qeth_ipa_cmd *cmd = NULL;
QETH_CARD_TEXT(card, 2, "sendctl"); QETH_CARD_TEXT(card, 2, "sendctl");
@ -2064,23 +2064,27 @@ int qeth_send_control_data(struct qeth_card *card, int len,
} }
reply->callback = reply_cb; reply->callback = reply_cb;
reply->param = reply_param; reply->param = reply_param;
if (card->state == CARD_STATE_DOWN)
reply->seqno = QETH_IDX_COMMAND_SEQNO;
else
reply->seqno = card->seqno.ipa++;
init_waitqueue_head(&reply->wait_q); init_waitqueue_head(&reply->wait_q);
spin_lock_irqsave(&card->lock, flags);
list_add_tail(&reply->list, &card->cmd_waiter_list);
spin_unlock_irqrestore(&card->lock, flags);
QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN); QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN);
while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ;
if (IS_IPA(iob->data)) {
cmd = __ipa_cmd(iob);
cmd->hdr.seqno = card->seqno.ipa++;
reply->seqno = cmd->hdr.seqno;
event_timeout = QETH_IPA_TIMEOUT;
} else {
reply->seqno = QETH_IDX_COMMAND_SEQNO;
event_timeout = QETH_TIMEOUT;
}
qeth_prepare_control_data(card, len, iob); qeth_prepare_control_data(card, len, iob);
if (IS_IPA(iob->data)) spin_lock_irqsave(&card->lock, flags);
event_timeout = QETH_IPA_TIMEOUT; list_add_tail(&reply->list, &card->cmd_waiter_list);
else spin_unlock_irqrestore(&card->lock, flags);
event_timeout = QETH_TIMEOUT;
timeout = jiffies + event_timeout; timeout = jiffies + event_timeout;
QETH_CARD_TEXT(card, 6, "noirqpnd"); QETH_CARD_TEXT(card, 6, "noirqpnd");
@ -2105,9 +2109,8 @@ int qeth_send_control_data(struct qeth_card *card, int len,
/* we have only one long running ipassist, since we can ensure /* we have only one long running ipassist, since we can ensure
process context of this command we can sleep */ process context of this command we can sleep */
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); if (cmd && cmd->hdr.command == IPA_CMD_SETIP &&
if ((cmd->hdr.command == IPA_CMD_SETIP) && cmd->hdr.prot_version == QETH_PROT_IPV4) {
(cmd->hdr.prot_version == QETH_PROT_IPV4)) {
if (!wait_event_timeout(reply->wait_q, if (!wait_event_timeout(reply->wait_q,
atomic_read(&reply->received), event_timeout)) atomic_read(&reply->received), event_timeout))
goto time_err; goto time_err;
@ -2871,7 +2874,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card,
memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); memset(cmd, 0, sizeof(struct qeth_ipa_cmd));
cmd->hdr.command = command; cmd->hdr.command = command;
cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST;
cmd->hdr.seqno = card->seqno.ipa; /* cmd->hdr.seqno is set by qeth_send_control_data() */
cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type);
cmd->hdr.rel_adapter_no = (__u8) card->info.portno; cmd->hdr.rel_adapter_no = (__u8) card->info.portno;
if (card->options.layer2) if (card->options.layer2)
@ -3852,10 +3855,12 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags);
int qeth_get_elements_no(struct qeth_card *card, int qeth_get_elements_no(struct qeth_card *card,
struct sk_buff *skb, int extra_elems, int data_offset) struct sk_buff *skb, int extra_elems, int data_offset)
{ {
int elements = qeth_get_elements_for_range( addr_t end = (addr_t)skb->data + skb_headlen(skb);
(addr_t)skb->data + data_offset, int elements = qeth_get_elements_for_frags(skb);
(addr_t)skb->data + skb_headlen(skb)) + addr_t start = (addr_t)skb->data + data_offset;
qeth_get_elements_for_frags(skb);
if (start != end)
elements += qeth_get_elements_for_range(start, end);
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2, "Invalid size of IP packet " QETH_DBF_MESSAGE(2, "Invalid size of IP packet "

View file

@ -39,8 +39,40 @@ struct qeth_ipaddr {
unsigned int pfxlen; unsigned int pfxlen;
} a6; } a6;
} u; } u;
}; };
static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1,
struct qeth_ipaddr *a2)
{
if (a1->proto != a2->proto)
return false;
if (a1->proto == QETH_PROT_IPV6)
return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr);
return a1->u.a4.addr == a2->u.a4.addr;
}
static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1,
struct qeth_ipaddr *a2)
{
/* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(),
* so 'proto' and 'addr' match for sure.
*
* For ucast:
* - 'mac' is always 0.
* - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching
* values are required to avoid mixups in takeover eligibility.
*
* For mcast,
* - 'mac' is mapped from the IP, and thus always matches.
* - 'mask'/'pfxlen' is always 0.
*/
if (a1->type != a2->type)
return false;
if (a1->proto == QETH_PROT_IPV6)
return a1->u.a6.pfxlen == a2->u.a6.pfxlen;
return a1->u.a4.mask == a2->u.a4.mask;
}
static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr) static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr)
{ {
u64 ret = 0; u64 ret = 0;

View file

@ -154,6 +154,24 @@ int qeth_l3_string_to_ipaddr(const char *buf, enum qeth_prot_versions proto,
return -EINVAL; return -EINVAL;
} }
static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
struct qeth_ipaddr *query)
{
u64 key = qeth_l3_ipaddr_hash(query);
struct qeth_ipaddr *addr;
if (query->is_multicast) {
hash_for_each_possible(card->ip_mc_htable, addr, hnode, key)
if (qeth_l3_addr_match_ip(addr, query))
return addr;
} else {
hash_for_each_possible(card->ip_htable, addr, hnode, key)
if (qeth_l3_addr_match_ip(addr, query))
return addr;
}
return NULL;
}
static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
{ {
int i, j; int i, j;
@ -207,34 +225,6 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
return rc; return rc;
} }
inline int
qeth_l3_ipaddrs_is_equal(struct qeth_ipaddr *addr1, struct qeth_ipaddr *addr2)
{
return addr1->proto == addr2->proto &&
!memcmp(&addr1->u, &addr2->u, sizeof(addr1->u)) &&
!memcmp(&addr1->mac, &addr2->mac, sizeof(addr1->mac));
}
static struct qeth_ipaddr *
qeth_l3_ip_from_hash(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
{
struct qeth_ipaddr *addr;
if (tmp_addr->is_multicast) {
hash_for_each_possible(card->ip_mc_htable, addr,
hnode, qeth_l3_ipaddr_hash(tmp_addr))
if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
return addr;
} else {
hash_for_each_possible(card->ip_htable, addr,
hnode, qeth_l3_ipaddr_hash(tmp_addr))
if (qeth_l3_ipaddrs_is_equal(tmp_addr, addr))
return addr;
}
return NULL;
}
int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
{ {
int rc = 0; int rc = 0;
@ -249,8 +239,8 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
} }
addr = qeth_l3_ip_from_hash(card, tmp_addr); addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
if (!addr) if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr))
return -ENOENT; return -ENOENT;
addr->ref_counter--; addr->ref_counter--;
@ -259,11 +249,7 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
if (addr->in_progress) if (addr->in_progress)
return -EINPROGRESS; return -EINPROGRESS;
if (!qeth_card_hw_is_reachable(card)) { if (qeth_card_hw_is_reachable(card))
addr->disp_flag = QETH_DISP_ADDR_DELETE;
return 0;
}
rc = qeth_l3_deregister_addr_entry(card, addr); rc = qeth_l3_deregister_addr_entry(card, addr);
hash_del(&addr->hnode); hash_del(&addr->hnode);
@ -276,6 +262,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
{ {
int rc = 0; int rc = 0;
struct qeth_ipaddr *addr; struct qeth_ipaddr *addr;
char buf[40];
QETH_CARD_TEXT(card, 4, "addip"); QETH_CARD_TEXT(card, 4, "addip");
@ -286,8 +273,20 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8);
} }
addr = qeth_l3_ip_from_hash(card, tmp_addr); addr = qeth_l3_find_addr_by_ip(card, tmp_addr);
if (!addr) { if (addr) {
if (tmp_addr->type != QETH_IP_TYPE_NORMAL)
return -EADDRINUSE;
if (qeth_l3_addr_match_all(addr, tmp_addr)) {
addr->ref_counter++;
return 0;
}
qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u,
buf);
dev_warn(&card->gdev->dev,
"Registering IP address %s failed\n", buf);
return -EADDRINUSE;
} else {
addr = qeth_l3_get_addr_buffer(tmp_addr->proto); addr = qeth_l3_get_addr_buffer(tmp_addr->proto);
if (!addr) if (!addr)
return -ENOMEM; return -ENOMEM;
@ -327,18 +326,15 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
(rc == IPA_RC_LAN_OFFLINE)) { (rc == IPA_RC_LAN_OFFLINE)) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
if (addr->ref_counter < 1) { if (addr->ref_counter < 1) {
qeth_l3_delete_ip(card, addr); qeth_l3_deregister_addr_entry(card, addr);
hash_del(&addr->hnode);
kfree(addr); kfree(addr);
} }
} else { } else {
hash_del(&addr->hnode); hash_del(&addr->hnode);
kfree(addr); kfree(addr);
} }
} else {
if (addr->type == QETH_IP_TYPE_NORMAL)
addr->ref_counter++;
} }
return rc; return rc;
} }
@ -406,11 +402,7 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
spin_lock_bh(&card->ip_lock); spin_lock_bh(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
if (addr->disp_flag == QETH_DISP_ADDR_DELETE) { if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
qeth_l3_deregister_addr_entry(card, addr);
hash_del(&addr->hnode);
kfree(addr);
} else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
if (addr->proto == QETH_PROT_IPV4) { if (addr->proto == QETH_PROT_IPV4) {
addr->in_progress = 1; addr->in_progress = 1;
spin_unlock_bh(&card->ip_lock); spin_unlock_bh(&card->ip_lock);
@ -726,12 +718,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
return -ENOMEM; return -ENOMEM;
spin_lock_bh(&card->ip_lock); spin_lock_bh(&card->ip_lock);
rc = qeth_l3_add_ip(card, ipaddr);
if (qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
else
qeth_l3_add_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock); spin_unlock_bh(&card->ip_lock);
kfree(ipaddr); kfree(ipaddr);
@ -794,12 +781,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
return -ENOMEM; return -ENOMEM;
spin_lock_bh(&card->ip_lock); spin_lock_bh(&card->ip_lock);
rc = qeth_l3_add_ip(card, ipaddr);
if (qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
else
qeth_l3_add_ip(card, ipaddr);
spin_unlock_bh(&card->ip_lock); spin_unlock_bh(&card->ip_lock);
kfree(ipaddr); kfree(ipaddr);
@ -1444,8 +1426,9 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev)
memcpy(tmp->mac, buf, sizeof(tmp->mac)); memcpy(tmp->mac, buf, sizeof(tmp->mac));
tmp->is_multicast = 1; tmp->is_multicast = 1;
ipm = qeth_l3_ip_from_hash(card, tmp); ipm = qeth_l3_find_addr_by_ip(card, tmp);
if (ipm) { if (ipm) {
/* for mcast, by-IP match means full match */
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
} else { } else {
ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
@ -1528,8 +1511,9 @@ qeth_l3_add_mc6_to_hash(struct qeth_card *card, struct inet6_dev *in6_dev)
sizeof(struct in6_addr)); sizeof(struct in6_addr));
tmp->is_multicast = 1; tmp->is_multicast = 1;
ipm = qeth_l3_ip_from_hash(card, tmp); ipm = qeth_l3_find_addr_by_ip(card, tmp);
if (ipm) { if (ipm) {
/* for mcast, by-IP match means full match */
ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
continue; continue;
} }
@ -2784,11 +2768,12 @@ static void qeth_tso_fill_header(struct qeth_card *card,
static int qeth_l3_get_elements_no_tso(struct qeth_card *card, static int qeth_l3_get_elements_no_tso(struct qeth_card *card,
struct sk_buff *skb, int extra_elems) struct sk_buff *skb, int extra_elems)
{ {
addr_t tcpdptr = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb);
int elements = qeth_get_elements_for_range( addr_t end = (addr_t)skb->data + skb_headlen(skb);
tcpdptr, int elements = qeth_get_elements_for_frags(skb);
(addr_t)skb->data + skb_headlen(skb)) +
qeth_get_elements_for_frags(skb); if (start != end)
elements += qeth_get_elements_for_range(start, end);
if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) {
QETH_DBF_MESSAGE(2, QETH_DBF_MESSAGE(2,

View file

@ -114,13 +114,17 @@ out:
int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{ {
int ret; int ret;
umode_t old_mode = inode->i_mode;
if (type == ACL_TYPE_ACCESS && acl) { if (type == ACL_TYPE_ACCESS && acl) {
ret = posix_acl_update_mode(inode, &inode->i_mode, &acl); ret = posix_acl_update_mode(inode, &inode->i_mode, &acl);
if (ret) if (ret)
return ret; return ret;
} }
return __btrfs_set_acl(NULL, inode, acl, type); ret = __btrfs_set_acl(NULL, inode, acl, type);
if (ret)
inode->i_mode = old_mode;
return ret;
} }
/* /*

View file

@ -3072,7 +3072,7 @@ static inline bool vma_is_fsdax(struct vm_area_struct *vma)
if (!vma_is_dax(vma)) if (!vma_is_dax(vma))
return false; return false;
inode = file_inode(vma->vm_file); inode = file_inode(vma->vm_file);
if (inode->i_mode == S_IFCHR) if (S_ISCHR(inode->i_mode))
return false; /* device-dax */ return false; /* device-dax */
return true; return true;
} }

View file

@ -72,7 +72,6 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \
BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \
\ \
_i &= _mask; \ (typeof(_i)) (_i & _mask); \
_i; \
}) })
#endif /* _LINUX_NOSPEC_H */ #endif /* _LINUX_NOSPEC_H */

View file

@ -62,6 +62,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
UDP_SKB_CB(skb)->cscov = cscov; UDP_SKB_CB(skb)->cscov = cscov;
if (skb->ip_summed == CHECKSUM_COMPLETE) if (skb->ip_summed == CHECKSUM_COMPLETE)
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
skb->csum_valid = 0;
} }
return 0; return 0;

View file

@ -23,8 +23,10 @@ static void bpf_array_free_percpu(struct bpf_array *array)
{ {
int i; int i;
for (i = 0; i < array->map.max_entries; i++) for (i = 0; i < array->map.max_entries; i++) {
free_percpu(array->pptrs[i]); free_percpu(array->pptrs[i]);
cond_resched();
}
} }
static int bpf_array_alloc_percpu(struct bpf_array *array) static int bpf_array_alloc_percpu(struct bpf_array *array)
@ -40,6 +42,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
return -ENOMEM; return -ENOMEM;
} }
array->pptrs[i] = ptr; array->pptrs[i] = ptr;
cond_resched();
} }
return 0; return 0;
@ -51,8 +54,9 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
u32 elem_size, index_mask, max_entries; u32 elem_size, index_mask, max_entries;
bool unpriv = !capable(CAP_SYS_ADMIN); bool unpriv = !capable(CAP_SYS_ADMIN);
u64 cost, array_size, mask64;
struct bpf_array *array; struct bpf_array *array;
u64 array_size, mask64; int ret;
/* check sanity of attributes */ /* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 || if (attr->max_entries == 0 || attr->key_size != 4 ||
@ -96,8 +100,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
array_size += (u64) max_entries * elem_size; array_size += (u64) max_entries * elem_size;
/* make sure there is no u32 overflow later in round_up() */ /* make sure there is no u32 overflow later in round_up() */
if (array_size >= U32_MAX - PAGE_SIZE) cost = array_size;
if (cost >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
if (percpu) {
cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
if (cost >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-ENOMEM);
}
cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
ret = bpf_map_precharge_memlock(cost);
if (ret < 0)
return ERR_PTR(ret);
/* allocate all map elements and zero-initialize them */ /* allocate all map elements and zero-initialize them */
array = bpf_map_area_alloc(array_size); array = bpf_map_area_alloc(array_size);
@ -111,20 +126,16 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
array->map.key_size = attr->key_size; array->map.key_size = attr->key_size;
array->map.value_size = attr->value_size; array->map.value_size = attr->value_size;
array->map.max_entries = attr->max_entries; array->map.max_entries = attr->max_entries;
array->map.map_flags = attr->map_flags;
array->map.pages = cost;
array->elem_size = elem_size; array->elem_size = elem_size;
if (!percpu) if (percpu &&
goto out; (elem_size > PCPU_MIN_UNIT_SIZE ||
bpf_array_alloc_percpu(array))) {
array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
if (array_size >= U32_MAX - PAGE_SIZE ||
elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
bpf_map_area_free(array); bpf_map_area_free(array);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
out:
array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
return &array->map; return &array->map;
} }

View file

@ -91,6 +91,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
smap->map.key_size = attr->key_size; smap->map.key_size = attr->key_size;
smap->map.value_size = value_size; smap->map.value_size = value_size;
smap->map.max_entries = attr->max_entries; smap->map.max_entries = attr->max_entries;
smap->map.map_flags = attr->map_flags;
smap->n_buckets = n_buckets; smap->n_buckets = n_buckets;
smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;

View file

@ -1884,6 +1884,12 @@ int timers_dead_cpu(unsigned int cpu)
spin_lock_irq(&new_base->lock); spin_lock_irq(&new_base->lock);
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
/*
* The current CPUs base clock might be stale. Update it
* before moving the timers over.
*/
forward_timer_base(new_base);
BUG_ON(old_base->running_timer); BUG_ON(old_base->running_timer);
for (i = 0; i < WHEEL_SIZE; i++) for (i = 0; i < WHEEL_SIZE; i++)

View file

@ -230,6 +230,9 @@ static ssize_t brport_show(struct kobject *kobj,
struct brport_attribute *brport_attr = to_brport_attr(attr); struct brport_attribute *brport_attr = to_brport_attr(attr);
struct net_bridge_port *p = to_brport(kobj); struct net_bridge_port *p = to_brport(kobj);
if (!brport_attr->show)
return -EINVAL;
return brport_attr->show(p, buf); return brport_attr->show(p, buf);
} }

View file

@ -2199,8 +2199,11 @@ EXPORT_SYMBOL(netif_set_xps_queue);
*/ */
int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
{ {
bool disabling;
int rc; int rc;
disabling = txq < dev->real_num_tx_queues;
if (txq < 1 || txq > dev->num_tx_queues) if (txq < 1 || txq > dev->num_tx_queues)
return -EINVAL; return -EINVAL;
@ -2216,15 +2219,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
if (dev->num_tc) if (dev->num_tc)
netif_setup_tc(dev, txq); netif_setup_tc(dev, txq);
if (txq < dev->real_num_tx_queues) { dev->real_num_tx_queues = txq;
if (disabling) {
synchronize_net();
qdisc_reset_all_tx_gt(dev, txq); qdisc_reset_all_tx_gt(dev, txq);
#ifdef CONFIG_XPS #ifdef CONFIG_XPS
netif_reset_xps_queues_gt(dev, txq); netif_reset_xps_queues_gt(dev, txq);
#endif #endif
} }
} else {
dev->real_num_tx_queues = txq;
} }
dev->real_num_tx_queues = txq;
return 0; return 0;
} }
EXPORT_SYMBOL(netif_set_real_num_tx_queues); EXPORT_SYMBOL(netif_set_real_num_tx_queues);

View file

@ -640,6 +640,11 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
fi->fib_nh, cfg)) fi->fib_nh, cfg))
return 1; return 1;
} }
#ifdef CONFIG_IP_ROUTE_CLASSID
if (cfg->fc_flow &&
cfg->fc_flow != fi->fib_nh->nh_tclassid)
return 1;
#endif
if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
(!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw)) (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
return 0; return 0;

View file

@ -126,10 +126,13 @@ static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
static int ip_rt_error_cost __read_mostly = HZ; static int ip_rt_error_cost __read_mostly = HZ;
static int ip_rt_error_burst __read_mostly = 5 * HZ; static int ip_rt_error_burst __read_mostly = 5 * HZ;
static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256; static int ip_rt_min_advmss __read_mostly = 256;
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT; static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
/* /*
* Interface to generic destination cache. * Interface to generic destination cache.
*/ */
@ -2790,7 +2793,8 @@ static struct ctl_table ipv4_route_table[] = {
.data = &ip_rt_min_pmtu, .data = &ip_rt_min_pmtu,
.maxlen = sizeof(int), .maxlen = sizeof(int),
.mode = 0644, .mode = 0644,
.proc_handler = proc_dointvec, .proc_handler = proc_dointvec_minmax,
.extra1 = &ip_min_valid_pmtu,
}, },
{ {
.procname = "min_adv_mss", .procname = "min_adv_mss",

View file

@ -1580,7 +1580,7 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
*/ */
segs = max_t(u32, bytes / mss_now, min_tso_segs); segs = max_t(u32, bytes / mss_now, min_tso_segs);
return min_t(u32, segs, sk->sk_gso_max_segs); return segs;
} }
EXPORT_SYMBOL(tcp_tso_autosize); EXPORT_SYMBOL(tcp_tso_autosize);
@ -1592,8 +1592,10 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0; u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
return tso_segs ? : if (!tso_segs)
tcp_tso_autosize(sk, mss_now, sysctl_tcp_min_tso_segs); tso_segs = tcp_tso_autosize(sk, mss_now,
sysctl_tcp_min_tso_segs);
return min_t(u32, tso_segs, sk->sk_gso_max_segs);
} }
/* Returns the portion of skb which can be sent right away */ /* Returns the portion of skb which can be sent right away */
@ -1907,6 +1909,24 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
} }
} }
static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
{
struct sk_buff *skb, *next;
skb = tcp_send_head(sk);
tcp_for_write_queue_from_safe(skb, next, sk) {
if (len <= skb->len)
break;
if (unlikely(TCP_SKB_CB(skb)->eor))
return false;
len -= skb->len;
}
return true;
}
/* Create a new MTU probe if we are ready. /* Create a new MTU probe if we are ready.
* MTU probe is regularly attempting to increase the path MTU by * MTU probe is regularly attempting to increase the path MTU by
* deliberately sending larger packets. This discovers routing * deliberately sending larger packets. This discovers routing
@ -1979,6 +1999,9 @@ static int tcp_mtu_probe(struct sock *sk)
return 0; return 0;
} }
if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
return -1;
/* We're allowed to probe. Build it now. */ /* We're allowed to probe. Build it now. */
nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
if (!nskb) if (!nskb)
@ -2014,6 +2037,10 @@ static int tcp_mtu_probe(struct sock *sk)
/* We've eaten all the data from this skb. /* We've eaten all the data from this skb.
* Throw it away. */ * Throw it away. */
TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
/* If this is the last SKB we copy and eor is set
* we need to propagate it to the new skb.
*/
TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
tcp_unlink_write_queue(skb, sk); tcp_unlink_write_queue(skb, sk);
sk_wmem_free_skb(sk, skb); sk_wmem_free_skb(sk, skb);
} else { } else {

View file

@ -1714,6 +1714,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
err = udplite_checksum_init(skb, uh); err = udplite_checksum_init(skb, uh);
if (err) if (err)
return err; return err;
if (UDP_SKB_CB(skb)->partial_cov) {
skb->csum = inet_compute_pseudo(skb, proto);
return 0;
}
} }
/* Note, we are only interested in != 0 or == 0, thus the /* Note, we are only interested in != 0 or == 0, thus the

View file

@ -72,6 +72,11 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
err = udplite_checksum_init(skb, uh); err = udplite_checksum_init(skb, uh);
if (err) if (err)
return err; return err;
if (UDP_SKB_CB(skb)->partial_cov) {
skb->csum = ip6_compute_pseudo(skb, proto);
return 0;
}
} }
/* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels) /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)

View file

@ -176,7 +176,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
#ifdef CONFIG_IPV6_SIT_6RD #ifdef CONFIG_IPV6_SIT_6RD
struct ip_tunnel *t = netdev_priv(dev); struct ip_tunnel *t = netdev_priv(dev);
if (t->dev == sitn->fb_tunnel_dev) { if (dev == sitn->fb_tunnel_dev) {
ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0); ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
t->ip6rd.relay_prefix = 0; t->ip6rd.relay_prefix = 0;
t->ip6rd.prefixlen = 16; t->ip6rd.prefixlen = 16;

View file

@ -7,6 +7,7 @@
#include <linux/if_arp.h> #include <linux/if_arp.h>
#include <linux/ipv6.h> #include <linux/ipv6.h>
#include <linux/mpls.h> #include <linux/mpls.h>
#include <linux/nospec.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <net/ip.h> #include <net/ip.h>
#include <net/dst.h> #include <net/dst.h>
@ -756,6 +757,22 @@ errout:
return err; return err;
} }
static bool mpls_label_ok(struct net *net, unsigned int *index)
{
bool is_ok = true;
/* Reserved labels may not be set */
if (*index < MPLS_LABEL_FIRST_UNRESERVED)
is_ok = false;
/* The full 20 bit range may not be supported. */
if (is_ok && *index >= net->mpls.platform_labels)
is_ok = false;
*index = array_index_nospec(*index, net->mpls.platform_labels);
return is_ok;
}
static int mpls_route_add(struct mpls_route_config *cfg) static int mpls_route_add(struct mpls_route_config *cfg)
{ {
struct mpls_route __rcu **platform_label; struct mpls_route __rcu **platform_label;
@ -774,12 +791,7 @@ static int mpls_route_add(struct mpls_route_config *cfg)
index = find_free_label(net); index = find_free_label(net);
} }
/* Reserved labels may not be set */ if (!mpls_label_ok(net, &index))
if (index < MPLS_LABEL_FIRST_UNRESERVED)
goto errout;
/* The full 20 bit range may not be supported. */
if (index >= net->mpls.platform_labels)
goto errout; goto errout;
/* Append makes no sense with mpls */ /* Append makes no sense with mpls */
@ -840,12 +852,7 @@ static int mpls_route_del(struct mpls_route_config *cfg)
index = cfg->rc_label; index = cfg->rc_label;
/* Reserved labels may not be removed */ if (!mpls_label_ok(net, &index))
if (index < MPLS_LABEL_FIRST_UNRESERVED)
goto errout;
/* The full 20 bit range may not be supported */
if (index >= net->mpls.platform_labels)
goto errout; goto errout;
mpls_route_update(net, index, NULL, &cfg->rc_nlinfo); mpls_route_update(net, index, NULL, &cfg->rc_nlinfo);
@ -1279,10 +1286,9 @@ static int rtm_to_route_config(struct sk_buff *skb, struct nlmsghdr *nlh,
&cfg->rc_label)) &cfg->rc_label))
goto errout; goto errout;
/* Reserved labels may not be set */ if (!mpls_label_ok(cfg->rc_nlinfo.nl_net,
if (cfg->rc_label < MPLS_LABEL_FIRST_UNRESERVED) &cfg->rc_label))
goto errout; goto errout;
break; break;
} }
case RTA_VIA: case RTA_VIA:

View file

@ -2258,7 +2258,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
if (cb->start) { if (cb->start) {
ret = cb->start(cb); ret = cb->start(cb);
if (ret) if (ret)
goto error_unlock; goto error_put;
} }
nlk->cb_running = true; nlk->cb_running = true;
@ -2278,6 +2278,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
*/ */
return -EINTR; return -EINTR;
error_put:
module_put(control->module);
error_unlock: error_unlock:
sock_put(sk); sock_put(sk);
mutex_unlock(nlk->cb_mutex); mutex_unlock(nlk->cb_mutex);

View file

@ -1103,6 +1103,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
{ {
struct sk_buff *tmp; struct sk_buff *tmp;
struct net *net, *prev = NULL; struct net *net, *prev = NULL;
bool delivered = false;
int err; int err;
for_each_net_rcu(net) { for_each_net_rcu(net) {
@ -1114,14 +1115,21 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
} }
err = nlmsg_multicast(prev->genl_sock, tmp, err = nlmsg_multicast(prev->genl_sock, tmp,
portid, group, flags); portid, group, flags);
if (err) if (!err)
delivered = true;
else if (err != -ESRCH)
goto error; goto error;
} }
prev = net; prev = net;
} }
return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags); err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
if (!err)
delivered = true;
else if (err != -ESRCH)
goto error;
return delivered ? 0 : -ESRCH;
error: error:
kfree_skb(skb); kfree_skb(skb);
return err; return err;

View file

@ -391,7 +391,7 @@ send_fragmentable:
(char *)&opt, sizeof(opt)); (char *)&opt, sizeof(opt));
if (ret == 0) { if (ret == 0) {
ret = kernel_sendmsg(conn->params.local->socket, &msg, ret = kernel_sendmsg(conn->params.local->socket, &msg,
iov, 1, iov[0].iov_len); iov, 2, len);
opt = IPV6_PMTUDISC_DO; opt = IPV6_PMTUDISC_DO;
kernel_setsockopt(conn->params.local->socket, kernel_setsockopt(conn->params.local->socket,

View file

@ -324,8 +324,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
bdst = ip6_dst_lookup_flow(sk, fl6, final_p); bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
if (!IS_ERR(bdst) && if (IS_ERR(bdst))
ipv6_chk_addr(dev_net(bdst->dev), continue;
if (ipv6_chk_addr(dev_net(bdst->dev),
&laddr->a.v6.sin6_addr, bdst->dev, 1)) { &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
if (!IS_ERR_OR_NULL(dst)) if (!IS_ERR_OR_NULL(dst))
dst_release(dst); dst_release(dst);
@ -334,8 +336,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
} }
bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
if (matchlen > bmatchlen) if (matchlen > bmatchlen) {
dst_release(bdst);
continue; continue;
}
if (!IS_ERR_OR_NULL(dst)) if (!IS_ERR_OR_NULL(dst))
dst_release(dst); dst_release(dst);

View file

@ -510,21 +510,19 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
if (IS_ERR(rt)) if (IS_ERR(rt))
continue; continue;
if (!dst)
dst = &rt->dst;
/* Ensure the src address belongs to the output /* Ensure the src address belongs to the output
* interface. * interface.
*/ */
odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr, odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
false); false);
if (!odev || odev->ifindex != fl4->flowi4_oif) { if (!odev || odev->ifindex != fl4->flowi4_oif) {
if (&rt->dst != dst) if (!dst)
dst = &rt->dst;
else
dst_release(&rt->dst); dst_release(&rt->dst);
continue; continue;
} }
if (dst != &rt->dst)
dst_release(dst); dst_release(dst);
dst = &rt->dst; dst = &rt->dst;
break; break;

View file

@ -1373,9 +1373,14 @@ static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
sctp_chunkhdr_t *chunk_hdr; sctp_chunkhdr_t *chunk_hdr;
struct sk_buff *skb; struct sk_buff *skb;
struct sock *sk; struct sock *sk;
int chunklen;
chunklen = SCTP_PAD4(sizeof(*chunk_hdr) + paylen);
if (chunklen > SCTP_MAX_CHUNK_LEN)
goto nodata;
/* No need to allocate LL here, as this is only a chunk. */ /* No need to allocate LL here, as this is only a chunk. */
skb = alloc_skb(SCTP_PAD4(sizeof(sctp_chunkhdr_t) + paylen), gfp); skb = alloc_skb(chunklen, gfp);
if (!skb) if (!skb)
goto nodata; goto nodata;

View file

@ -180,7 +180,7 @@ static const struct kernel_param_ops param_ops_xint = {
}; };
#define param_check_xint param_check_int #define param_check_xint param_check_int
static int power_save = CONFIG_SND_HDA_POWER_SAVE_DEFAULT; static int power_save = -1;
module_param(power_save, xint, 0644); module_param(power_save, xint, 0644);
MODULE_PARM_DESC(power_save, "Automatic power-saving timeout " MODULE_PARM_DESC(power_save, "Automatic power-saving timeout "
"(in second, 0 = disable)."); "(in second, 0 = disable).");
@ -2042,6 +2042,24 @@ out_free:
return err; return err;
} }
#ifdef CONFIG_PM
/* On some boards setting power_save to a non 0 value leads to clicking /
* popping sounds when ever we enter/leave powersaving mode. Ideally we would
* figure out how to avoid these sounds, but that is not always feasible.
* So we keep a list of devices where we disable powersaving as its known
* to causes problems on these devices.
*/
static struct snd_pci_quirk power_save_blacklist[] = {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
/* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
{}
};
#endif /* CONFIG_PM */
/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ /* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = { static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] = {
[AZX_DRIVER_NVIDIA] = 8, [AZX_DRIVER_NVIDIA] = 8,
@ -2054,6 +2072,7 @@ static int azx_probe_continue(struct azx *chip)
struct hdac_bus *bus = azx_bus(chip); struct hdac_bus *bus = azx_bus(chip);
struct pci_dev *pci = chip->pci; struct pci_dev *pci = chip->pci;
int dev = chip->dev_index; int dev = chip->dev_index;
int val;
int err; int err;
hda->probe_continued = 1; hda->probe_continued = 1;
@ -2129,7 +2148,22 @@ static int azx_probe_continue(struct azx *chip)
chip->running = 1; chip->running = 1;
azx_add_card_list(chip); azx_add_card_list(chip);
snd_hda_set_power_save(&chip->bus, power_save * 1000);
val = power_save;
#ifdef CONFIG_PM
if (val == -1) {
const struct snd_pci_quirk *q;
val = CONFIG_SND_HDA_POWER_SAVE_DEFAULT;
q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist);
if (q && val) {
dev_info(chip->card->dev, "device %04x:%04x is on the power_save blacklist, forcing power_save to 0\n",
q->subvendor, q->subdevice);
val = 0;
}
}
#endif /* CONFIG_PM */
snd_hda_set_power_save(&chip->bus, val * 1000);
if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo) if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo)
pm_runtime_put_autosuspend(&pci->dev); pm_runtime_put_autosuspend(&pci->dev);

View file

@ -4480,13 +4480,14 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
if (action == HDA_FIXUP_ACT_PRE_PROBE) { if (action == HDA_FIXUP_ACT_PRE_PROBE) {
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
snd_hda_apply_pincfgs(codec, pincfgs);
} else if (action == HDA_FIXUP_ACT_INIT) {
/* Enable DOCK device */ /* Enable DOCK device */
snd_hda_codec_write(codec, 0x17, 0, snd_hda_codec_write(codec, 0x17, 0,
AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0); AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
/* Enable DOCK device */ /* Enable DOCK device */
snd_hda_codec_write(codec, 0x19, 0, snd_hda_codec_write(codec, 0x19, 0,
AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0); AC_VERB_SET_CONFIG_DEFAULT_BYTES_3, 0);
snd_hda_apply_pincfgs(codec, pincfgs);
} }
} }

View file

@ -3277,4 +3277,51 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
} }
}, },
{
/*
* Bower's & Wilkins PX headphones only support the 48 kHz sample rate
* even though it advertises more. The capture interface doesn't work
* even on windows.
*/
USB_DEVICE(0x19b5, 0x0021),
.driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
.ifnum = QUIRK_ANY_INTERFACE,
.type = QUIRK_COMPOSITE,
.data = (const struct snd_usb_audio_quirk[]) {
{
.ifnum = 0,
.type = QUIRK_AUDIO_STANDARD_MIXER,
},
/* Capture */
{
.ifnum = 1,
.type = QUIRK_IGNORE_INTERFACE,
},
/* Playback */
{
.ifnum = 2,
.type = QUIRK_AUDIO_FIXED_ENDPOINT,
.data = &(const struct audioformat) {
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels = 2,
.iface = 2,
.altsetting = 1,
.altset_idx = 1,
.attributes = UAC_EP_CS_ATTR_FILL_MAX |
UAC_EP_CS_ATTR_SAMPLE_RATE,
.endpoint = 0x03,
.ep_attr = USB_ENDPOINT_XFER_ISOC,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
.rate_max = 48000,
.nr_rates = 1,
.rate_table = (unsigned int[]) {
48000
}
}
},
}
}
},
#undef USB_DEVICE_VENDOR_SPEC #undef USB_DEVICE_VENDOR_SPEC

View file

@ -976,8 +976,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
/* Check for overlaps */ /* Check for overlaps */
r = -EEXIST; r = -EEXIST;
kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) {
if ((slot->id >= KVM_USER_MEM_SLOTS) || if (slot->id == id)
(slot->id == id))
continue; continue;
if (!((base_gfn + npages <= slot->base_gfn) || if (!((base_gfn + npages <= slot->base_gfn) ||
(base_gfn >= slot->base_gfn + slot->npages))) (base_gfn >= slot->base_gfn + slot->npages)))