This is the 4.9.32 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAllBNNMACgkQONu9yGCS
 aT4rJA//XOtxp2nr01OqffSItCX0vq7vMofbjo7spI2LF3JyxKkx0hje0im82XoD
 cYhZHyeK9lCHRZt2O541xOp3ITconecRvlt6uq49zKNvlFAelnp0rRTOktluJK/n
 Xm7EP2KLHeHXv+rvCfzU0hDC9fvoZe82NIF7kddAWHX4D/K3C6Jw6FVc4/QPl2MG
 11R6pylPtMGObgPEmfDlohBitcC3KawXAhIcyTTAr3rcuO2Wm00H+uF0VOpyD9hZ
 S5A8ecuyZVQw3mIKAJ9vOwkVoln1E+/P/OltWEXElkBZONHpI4LB2IE7Nmwnx+pE
 5oO3afNZIYOejX+GQvK2Apc8VONDV2VZRoIstzl+uaKDNGFaSLIu8AQ/UQfmoqTE
 Pwrp/FxgDGjq6NOf/+MqNhiqS9433Xgmt4HT5xctzS0zFNGzLIokdjRH3x7n6cez
 2JvdtRvJfdNq31kh6xQE/8oXuZktRSw3GxC5K4Dw3NRh02/9KNAgEjNPEMNXziGw
 RhLhyXl8i5eGAu1gxruojzRyCvaMS6S8ZcAvSSoJze0kj8jfkZtQbBswvS1LF9EI
 FFemshd1sQxUeZiXGYPuUUfyDvxvs7KRIqAYux8lsIhSQTu16REqsZRrzidpDP+K
 xtH61YBWhwDHZKIoT+Jchmn8QCytI77MvFdGuTl+KRPV4OyyeJs=
 =NE4C
 -----END PGP SIGNATURE-----

Merge 4.9.32 into android-4.9

Changes in 4.9.32
	bnx2x: Fix Multi-Cos
	vxlan: eliminate cached dst leak
	ipv6: xfrm: Handle errors reported by xfrm6_find_1stfragopt()
	cxgb4: avoid enabling napi twice to the same queue
	tcp: disallow cwnd undo when switching congestion control
	vxlan: fix use-after-free on deletion
	ipv6: Fix leak in ipv6_gso_segment().
	net: ping: do not abuse udp_poll()
	net/ipv6: Fix CALIPSO causing GPF with datagram support
	net: ethoc: enable NAPI before poll may be scheduled
	net: stmmac: fix completely hung TX when using TSO
	net: bridge: start hello timer only if device is up
	sparc64: Add __multi3 for gcc 7.x and later.
	sparc64: mm: fix copy_tsb to correctly copy huge page TSBs
	sparc: Machine description indices can vary
	sparc64: reset mm cpumask after wrap
	sparc64: combine activate_mm and switch_mm
	sparc64: redefine first version
	sparc64: add per-cpu mm of secondary contexts
	sparc64: new context wrap
	sparc64: delete old wrap code
	arch/sparc: support NR_CPUS = 4096
	serial: ifx6x60: fix use-after-free on module unload
	ptrace: Properly initialize ptracer_cred on fork
	crypto: asymmetric_keys - handle EBUSY due to backlog correctly
	KEYS: fix dereferencing NULL payload with nonzero length
	KEYS: fix freeing uninitialized memory in key_update()
	KEYS: encrypted: avoid encrypting/decrypting stack buffers
	crypto: drbg - wait for crypto op not signal safe
	crypto: gcm - wait for crypto op not signal safe
	drm/amdgpu/ci: disable mclk switching for high refresh rates (v2)
	nfsd4: fix null dereference on replay
	nfsd: Fix up the "supattr_exclcreat" attributes
	efi: Don't issue error message when booted under Xen
	kvm: async_pf: fix rcu_irq_enter() with irqs enabled
	KVM: cpuid: Fix read/write out-of-bounds vulnerability in cpuid emulation
	arm64: KVM: Preserve RES1 bits in SCTLR_EL2
	arm64: KVM: Allow unaligned accesses at EL2
	arm: KVM: Allow unaligned accesses at HYP
	KVM: async_pf: avoid async pf injection when in guest mode
	KVM: arm/arm64: vgic-v3: Do not use Active+Pending state for a HW interrupt
	KVM: arm/arm64: vgic-v2: Do not use Active+Pending state for a HW interrupt
	dmaengine: usb-dmac: Fix DMAOR AE bit definition
	dmaengine: ep93xx: Always start from BASE0
	dmaengine: ep93xx: Don't drain the transfers in terminate_all()
	dmaengine: mv_xor_v2: handle mv_xor_v2_prep_sw_desc() error properly
	dmaengine: mv_xor_v2: properly handle wrapping in the array of HW descriptors
	dmaengine: mv_xor_v2: do not use descriptors not acked by async_tx
	dmaengine: mv_xor_v2: enable XOR engine after its configuration
	dmaengine: mv_xor_v2: fix tx_submit() implementation
	dmaengine: mv_xor_v2: remove interrupt coalescing
	dmaengine: mv_xor_v2: set DMA mask to 40 bits
	cfq-iosched: fix the delay of cfq_group's vdisktime under iops mode
	xen/privcmd: Support correctly 64KB page granularity when mapping memory
	ext4: fix SEEK_HOLE
	ext4: keep existing extra fields when inode expands
	ext4: fix data corruption with EXT4_GET_BLOCKS_ZERO
	ext4: fix fdatasync(2) after extent manipulation operations
	drm: Fix oops + Xserver hang when unplugging USB drm devices
	usb: gadget: f_mass_storage: Serialize wake and sleep execution
	usb: chipidea: udc: fix NULL pointer dereference if udc_start failed
	usb: chipidea: debug: check before accessing ci_role
	staging/lustre/lov: remove set_fs() call from lov_getstripe()
	iio: adc: bcm_iproc_adc: swap primary and secondary isr handler's
	iio: light: ltr501 Fix interchanged als/ps register field
	iio: proximity: as3935: fix AS3935_INT mask
	iio: proximity: as3935: fix iio_trigger_poll issue
	mei: make sysfs modalias format similar as uevent modalias
	cpufreq: cpufreq_register_driver() should return -ENODEV if init fails
	target: Re-add check to reject control WRITEs with overflow data
	drm/msm: Expose our reservation object when exporting a dmabuf.
	ahci: Acer SA5-271 SSD Not Detected Fix
	cgroup: Prevent kill_css() from being called more than once
	Input: elantech - add Fujitsu Lifebook E546/E557 to force crc_enabled
	cpuset: consider dying css as offline
	fs: add i_blocksize()
	ufs: restore proper tail allocation
	fix ufs_isblockset()
	ufs: restore maintaining ->i_blocks
	ufs: set correct ->s_maxsize
	ufs_extend_tail(): fix the braino in calling conventions of ufs_new_fragments()
	ufs_getfrag_block(): we only grab ->truncate_mutex on block creation path
	cxl: Fix error path on bad ioctl
	cxl: Avoid double free_irq() for psl,slice interrupts
	btrfs: use correct types for page indices in btrfs_page_exists_in_range
	btrfs: fix memory leak in update_space_info failure path
	KVM: arm/arm64: Handle possible NULL stage2 pud when ageing pages
	scsi: qla2xxx: don't disable a not previously enabled PCI device
	scsi: qla2xxx: Modify T262 FW dump template to specify same start/end to debug customer issues
	scsi: qla2xxx: Set bit 15 for DIAG_ECHO_TEST MBC
	scsi: qla2xxx: Fix mailbox pointer error in fwdump capture
	powerpc/sysdev/simple_gpio: Fix oops in gpio save_regs function
	powerpc/numa: Fix percpu allocations to be NUMA aware
	powerpc/hotplug-mem: Fix missing endian conversion of aa_index
	powerpc/kernel: Fix FP and vector register restoration
	powerpc/kernel: Initialize load_tm on task creation
	perf/core: Drop kernel samples even though :u is specified
	drm/vmwgfx: Handle vmalloc() failure in vmw_local_fifo_reserve()
	drm/vmwgfx: limit the number of mip levels in vmw_gb_surface_define_ioctl()
	drm/vmwgfx: Make sure backup_handle is always valid
	drm/nouveau/tmr: fully separate alarm execution/pending lists
	ALSA: timer: Fix race between read and ioctl
	ALSA: timer: Fix missing queue indices reset at SNDRV_TIMER_IOCTL_SELECT
	ASoC: Fix use-after-free at card unregistration
	cpu/hotplug: Drop the device lock on error
	drivers: char: mem: Fix wraparound check to allow mappings up to the end
	serial: sh-sci: Fix panic when serial console and DMA are enabled
	arm64: traps: fix userspace cache maintenance emulation on a tagged pointer
	arm64: hw_breakpoint: fix watchpoint matching for tagged pointers
	arm64: entry: improve data abort handling of tagged pointers
	ARM: 8636/1: Cleanup sanity_check_meminfo
	ARM: 8637/1: Adjust memory boundaries after reservations
	usercopy: Adjust tests to deal with SMAP/PAN
	drm/i915/vbt: don't propagate errors from intel_bios_init()
	drm/i915/vbt: split out defaults that are set when there is no VBT
	cpufreq: schedutil: move cached_raw_freq to struct sugov_policy
	cpufreq: schedutil: Fix per-CPU structure initialization in sugov_start()
	netfilter: nft_set_rbtree: handle element re-addition after deletion
	Linux 4.9.32

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2017-06-14 16:42:56 +02:00
commit da3493c028
152 changed files with 1026 additions and 589 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 9 PATCHLEVEL = 9
SUBLEVEL = 31 SUBLEVEL = 32
EXTRAVERSION = EXTRAVERSION =
NAME = Roaring Lionus NAME = Roaring Lionus

View file

@ -81,7 +81,7 @@ __setup("fpe=", fpe_setup);
extern void init_default_cache_policy(unsigned long); extern void init_default_cache_policy(unsigned long);
extern void paging_init(const struct machine_desc *desc); extern void paging_init(const struct machine_desc *desc);
extern void early_paging_init(const struct machine_desc *); extern void early_paging_init(const struct machine_desc *);
extern void sanity_check_meminfo(void); extern void adjust_lowmem_bounds(void);
extern enum reboot_mode reboot_mode; extern enum reboot_mode reboot_mode;
extern void setup_dma_zone(const struct machine_desc *desc); extern void setup_dma_zone(const struct machine_desc *desc);
@ -1093,8 +1093,14 @@ void __init setup_arch(char **cmdline_p)
setup_dma_zone(mdesc); setup_dma_zone(mdesc);
xen_early_init(); xen_early_init();
efi_init(); efi_init();
sanity_check_meminfo(); /*
* Make sure the calculation for lowmem/highmem is set appropriately
* before reserving/allocating any mmeory
*/
adjust_lowmem_bounds();
arm_memblock_init(mdesc); arm_memblock_init(mdesc);
/* Memory may have been removed so recalculate the bounds. */
adjust_lowmem_bounds();
early_ioremap_reset(); early_ioremap_reset();

View file

@ -95,7 +95,6 @@ __do_hyp_init:
@ - Write permission implies XN: disabled @ - Write permission implies XN: disabled
@ - Instruction cache: enabled @ - Instruction cache: enabled
@ - Data/Unified cache: enabled @ - Data/Unified cache: enabled
@ - Memory alignment checks: enabled
@ - MMU: enabled (this code must be run from an identity mapping) @ - MMU: enabled (this code must be run from an identity mapping)
mrc p15, 4, r0, c1, c0, 0 @ HSCR mrc p15, 4, r0, c1, c0, 0 @ HSCR
ldr r2, =HSCTLR_MASK ldr r2, =HSCTLR_MASK
@ -103,8 +102,8 @@ __do_hyp_init:
mrc p15, 0, r1, c1, c0, 0 @ SCTLR mrc p15, 0, r1, c1, c0, 0 @ SCTLR
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
and r1, r1, r2 and r1, r1, r2
ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) ) ARM( ldr r2, =(HSCTLR_M) )
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) ) THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
orr r1, r1, r2 orr r1, r1, r2
orr r0, r0, r1 orr r0, r0, r1
mcr p15, 4, r0, c1, c0, 0 @ HSCR mcr p15, 4, r0, c1, c0, 0 @ HSCR

View file

@ -872,6 +872,9 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
pmd_t *pmd; pmd_t *pmd;
pud = stage2_get_pud(kvm, cache, addr); pud = stage2_get_pud(kvm, cache, addr);
if (!pud)
return NULL;
if (stage2_pud_none(*pud)) { if (stage2_pud_none(*pud)) {
if (!cache) if (!cache)
return NULL; return NULL;

View file

@ -1152,13 +1152,12 @@ early_param("vmalloc", early_vmalloc);
phys_addr_t arm_lowmem_limit __initdata = 0; phys_addr_t arm_lowmem_limit __initdata = 0;
void __init sanity_check_meminfo(void) void __init adjust_lowmem_bounds(void)
{ {
phys_addr_t memblock_limit = 0; phys_addr_t memblock_limit = 0;
int highmem = 0;
u64 vmalloc_limit; u64 vmalloc_limit;
struct memblock_region *reg; struct memblock_region *reg;
bool should_use_highmem = false; phys_addr_t lowmem_limit = 0;
/* /*
* Let's use our own (unoptimized) equivalent of __pa() that is * Let's use our own (unoptimized) equivalent of __pa() that is
@ -1172,43 +1171,18 @@ void __init sanity_check_meminfo(void)
for_each_memblock(memory, reg) { for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base; phys_addr_t block_start = reg->base;
phys_addr_t block_end = reg->base + reg->size; phys_addr_t block_end = reg->base + reg->size;
phys_addr_t size_limit = reg->size;
if (reg->base >= vmalloc_limit) if (reg->base < vmalloc_limit) {
highmem = 1; if (block_end > lowmem_limit)
else /*
size_limit = vmalloc_limit - reg->base; * Compare as u64 to ensure vmalloc_limit does
* not get truncated. block_end should always
* fit in phys_addr_t so there should be no
if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) { * issue with assignment.
*/
if (highmem) { lowmem_limit = min_t(u64,
pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", vmalloc_limit,
&block_start, &block_end); block_end);
memblock_remove(reg->base, reg->size);
should_use_highmem = true;
continue;
}
if (reg->size > size_limit) {
phys_addr_t overlap_size = reg->size - size_limit;
pr_notice("Truncating RAM at %pa-%pa",
&block_start, &block_end);
block_end = vmalloc_limit;
pr_cont(" to -%pa", &block_end);
memblock_remove(vmalloc_limit, overlap_size);
should_use_highmem = true;
}
}
if (!highmem) {
if (block_end > arm_lowmem_limit) {
if (reg->size > size_limit)
arm_lowmem_limit = vmalloc_limit;
else
arm_lowmem_limit = block_end;
}
/* /*
* Find the first non-pmd-aligned page, and point * Find the first non-pmd-aligned page, and point
@ -1227,14 +1201,13 @@ void __init sanity_check_meminfo(void)
if (!IS_ALIGNED(block_start, PMD_SIZE)) if (!IS_ALIGNED(block_start, PMD_SIZE))
memblock_limit = block_start; memblock_limit = block_start;
else if (!IS_ALIGNED(block_end, PMD_SIZE)) else if (!IS_ALIGNED(block_end, PMD_SIZE))
memblock_limit = arm_lowmem_limit; memblock_limit = lowmem_limit;
} }
} }
} }
if (should_use_highmem) arm_lowmem_limit = lowmem_limit;
pr_notice("Consider using a HIGHMEM enabled kernel.\n");
high_memory = __va(arm_lowmem_limit - 1) + 1; high_memory = __va(arm_lowmem_limit - 1) + 1;
@ -1248,6 +1221,18 @@ void __init sanity_check_meminfo(void)
if (!memblock_limit) if (!memblock_limit)
memblock_limit = arm_lowmem_limit; memblock_limit = arm_lowmem_limit;
if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
if (memblock_end_of_DRAM() > arm_lowmem_limit) {
phys_addr_t end = memblock_end_of_DRAM();
pr_notice("Ignoring RAM at %pa-%pa\n",
&memblock_limit, &end);
pr_notice("Consider using a HIGHMEM enabled kernel.\n");
memblock_remove(memblock_limit, end - memblock_limit);
}
}
memblock_set_current_limit(memblock_limit); memblock_set_current_limit(memblock_limit);
} }

View file

@ -85,7 +85,7 @@ static unsigned long irbar_read(void)
} }
/* MPU initialisation functions */ /* MPU initialisation functions */
void __init sanity_check_meminfo_mpu(void) void __init adjust_lowmem_bounds_mpu(void)
{ {
phys_addr_t phys_offset = PHYS_OFFSET; phys_addr_t phys_offset = PHYS_OFFSET;
phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
@ -274,7 +274,7 @@ void __init mpu_setup(void)
} }
} }
#else #else
static void sanity_check_meminfo_mpu(void) {} static void adjust_lowmem_bounds_mpu(void) {}
static void __init mpu_setup(void) {} static void __init mpu_setup(void) {}
#endif /* CONFIG_ARM_MPU */ #endif /* CONFIG_ARM_MPU */
@ -295,10 +295,10 @@ void __init arm_mm_memblock_reserve(void)
#endif #endif
} }
void __init sanity_check_meminfo(void) void __init adjust_lowmem_bounds(void)
{ {
phys_addr_t end; phys_addr_t end;
sanity_check_meminfo_mpu(); adjust_lowmem_bounds_mpu();
end = memblock_end_of_DRAM(); end = memblock_end_of_DRAM();
high_memory = __va(end - 1) + 1; high_memory = __va(end - 1) + 1;
memblock_set_current_limit(end); memblock_set_current_limit(end);

View file

@ -0,0 +1,13 @@
#ifndef __ASM_ASM_UACCESS_H
#define __ASM_ASM_UACCESS_H
/*
* Remove the address tag from a virtual address, if present.
*/
.macro clear_address_tag, dst, addr
tst \addr, #(1 << 55)
bic \dst, \addr, #(0xff << 56)
csel \dst, \dst, \addr, eq
.endm
#endif

View file

@ -94,6 +94,10 @@
#define SCTLR_ELx_A (1 << 1) #define SCTLR_ELx_A (1 << 1)
#define SCTLR_ELx_M 1 #define SCTLR_ELx_M 1
#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
(1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
(1 << 28) | (1 << 29))
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ #define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
SCTLR_ELx_SA | SCTLR_ELx_I) SCTLR_ELx_SA | SCTLR_ELx_I)

View file

@ -109,9 +109,9 @@ static inline void set_fs(mm_segment_t fs)
}) })
/* /*
* When dealing with data aborts or instruction traps we may end up with * When dealing with data aborts, watchpoints, or instruction traps we may end
* a tagged userland pointer. Clear the tag to get a sane pointer to pass * up with a tagged userland pointer. Clear the tag to get a sane pointer to
* on to access_ok(), for instance. * pass on to access_ok(), for instance.
*/ */
#define untagged_addr(addr) sign_extend64(addr, 55) #define untagged_addr(addr) sign_extend64(addr, 55)

View file

@ -32,6 +32,7 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/asm-uaccess.h>
#include <asm/unistd.h> #include <asm/unistd.h>
/* /*
@ -428,12 +429,13 @@ el1_da:
/* /*
* Data abort handling * Data abort handling
*/ */
mrs x0, far_el1 mrs x3, far_el1
enable_dbg enable_dbg
// re-enable interrupts if they were enabled in the aborted context // re-enable interrupts if they were enabled in the aborted context
tbnz x23, #7, 1f // PSR_I_BIT tbnz x23, #7, 1f // PSR_I_BIT
enable_irq enable_irq
1: 1:
clear_address_tag x0, x3
mov x2, sp // struct pt_regs mov x2, sp // struct pt_regs
bl do_mem_abort bl do_mem_abort
@ -594,7 +596,7 @@ el0_da:
// enable interrupts before calling the main handler // enable interrupts before calling the main handler
enable_dbg_and_irq enable_dbg_and_irq
ct_user_exit ct_user_exit
bic x0, x26, #(0xff << 56) clear_address_tag x0, x26
mov x1, x25 mov x1, x25
mov x2, sp mov x2, sp
bl do_mem_abort bl do_mem_abort

View file

@ -36,6 +36,7 @@
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/system_misc.h> #include <asm/system_misc.h>
#include <asm/uaccess.h>
/* Breakpoint currently in use for each BRP. */ /* Breakpoint currently in use for each BRP. */
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);

View file

@ -435,7 +435,7 @@ int cpu_enable_cache_maint_trap(void *__unused)
} }
#define __user_cache_maint(insn, address, res) \ #define __user_cache_maint(insn, address, res) \
if (untagged_addr(address) >= user_addr_max()) { \ if (address >= user_addr_max()) { \
res = -EFAULT; \ res = -EFAULT; \
} else { \ } else { \
uaccess_ttbr0_enable(); \ uaccess_ttbr0_enable(); \
@ -461,7 +461,7 @@ static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
int ret = 0; int ret = 0;
address = (rt == 31) ? 0 : regs->regs[rt]; address = (rt == 31) ? 0 : untagged_addr(regs->regs[rt]);
switch (crm) { switch (crm) {
case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */ case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */

View file

@ -102,10 +102,13 @@ __do_hyp_init:
tlbi alle2 tlbi alle2
dsb sy dsb sy
mrs x4, sctlr_el2 /*
and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2 * Preserve all the RES1 bits while setting the default flags,
ldr x5, =SCTLR_ELx_FLAGS * as well as the EE bit on BE. Drop the A flag since the compiler
orr x4, x4, x5 * is allowed to generate unaligned accesses.
*/
ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
msr sctlr_el2, x4 msr sctlr_el2, x4
isb isb

View file

@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
extern int sysfs_add_device_to_node(struct device *dev, int nid); extern int sysfs_add_device_to_node(struct device *dev, int nid);
extern void sysfs_remove_device_from_node(struct device *dev, int nid); extern void sysfs_remove_device_from_node(struct device *dev, int nid);
static inline int early_cpu_to_node(int cpu)
{
int nid;
nid = numa_cpu_lookup_table[cpu];
/*
* Fall back to node 0 if nid is unset (it should be, except bugs).
* This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
*/
return (nid < 0) ? 0 : nid;
}
#else #else
static inline int early_cpu_to_node(int cpu) { return 0; }
static inline void dump_numa_cpu_topology(void) {} static inline void dump_numa_cpu_topology(void) {}
static inline int sysfs_add_device_to_node(struct device *dev, int nid) static inline int sysfs_add_device_to_node(struct device *dev, int nid)

View file

@ -1659,6 +1659,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
#ifdef CONFIG_VSX #ifdef CONFIG_VSX
current->thread.used_vsr = 0; current->thread.used_vsr = 0;
#endif #endif
current->thread.load_fp = 0;
memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state)); memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
current->thread.fp_save_area = NULL; current->thread.fp_save_area = NULL;
#ifdef CONFIG_ALTIVEC #ifdef CONFIG_ALTIVEC
@ -1667,6 +1668,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.vr_save_area = NULL; current->thread.vr_save_area = NULL;
current->thread.vrsave = 0; current->thread.vrsave = 0;
current->thread.used_vr = 0; current->thread.used_vr = 0;
current->thread.load_vec = 0;
#endif /* CONFIG_ALTIVEC */ #endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE #ifdef CONFIG_SPE
memset(current->thread.evr, 0, sizeof(current->thread.evr)); memset(current->thread.evr, 0, sizeof(current->thread.evr));
@ -1678,6 +1680,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
current->thread.tm_tfhar = 0; current->thread.tm_tfhar = 0;
current->thread.tm_texasr = 0; current->thread.tm_texasr = 0;
current->thread.tm_tfiar = 0; current->thread.tm_tfiar = 0;
current->thread.load_tm = 0;
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
} }
EXPORT_SYMBOL(start_thread); EXPORT_SYMBOL(start_thread);

View file

@ -595,7 +595,7 @@ void __init emergency_stack_init(void)
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
{ {
return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align, return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
__pa(MAX_DMA_ADDRESS)); __pa(MAX_DMA_ADDRESS));
} }
@ -606,7 +606,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
static int pcpu_cpu_distance(unsigned int from, unsigned int to) static int pcpu_cpu_distance(unsigned int from, unsigned int to)
{ {
if (cpu_to_node(from) == cpu_to_node(to)) if (early_cpu_to_node(from) == early_cpu_to_node(to))
return LOCAL_DISTANCE; return LOCAL_DISTANCE;
else else
return REMOTE_DISTANCE; return REMOTE_DISTANCE;

View file

@ -124,6 +124,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
for (i = 0; i < num_lmbs; i++) { for (i = 0; i < num_lmbs; i++) {
lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr); lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index); lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
lmbs[i].flags = be32_to_cpu(lmbs[i].flags); lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
} }
@ -147,6 +148,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
for (i = 0; i < num_lmbs; i++) { for (i = 0; i < num_lmbs; i++) {
lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr); lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index); lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
lmbs[i].flags = cpu_to_be32(lmbs[i].flags); lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
} }

View file

@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc) static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
{ {
struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc); struct u8_gpio_chip *u8_gc =
container_of(mm_gc, struct u8_gpio_chip, mm_gc);
u8_gc->data = in_8(mm_gc->regs); u8_gc->data = in_8(mm_gc->regs);
} }

View file

@ -187,9 +187,9 @@ config NR_CPUS
int "Maximum number of CPUs" int "Maximum number of CPUs"
depends on SMP depends on SMP
range 2 32 if SPARC32 range 2 32 if SPARC32
range 2 1024 if SPARC64 range 2 4096 if SPARC64
default 32 if SPARC32 default 32 if SPARC32
default 64 if SPARC64 default 4096 if SPARC64
source kernel/Kconfig.hz source kernel/Kconfig.hz

View file

@ -52,7 +52,7 @@
#define CTX_NR_MASK TAG_CONTEXT_BITS #define CTX_NR_MASK TAG_CONTEXT_BITS
#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK) #define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL)) #define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT)
#define CTX_VALID(__ctx) \ #define CTX_VALID(__ctx) \
(!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK)) (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK) #define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)

View file

@ -17,13 +17,8 @@ extern spinlock_t ctx_alloc_lock;
extern unsigned long tlb_context_cache; extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[]; extern unsigned long mmu_context_bmap[];
DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
void get_new_mmu_context(struct mm_struct *mm); void get_new_mmu_context(struct mm_struct *mm);
#ifdef CONFIG_SMP
void smp_new_mmu_context_version(void);
#else
#define smp_new_mmu_context_version() do { } while (0)
#endif
int init_new_context(struct task_struct *tsk, struct mm_struct *mm); int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void destroy_context(struct mm_struct *mm); void destroy_context(struct mm_struct *mm);
@ -74,8 +69,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
{ {
unsigned long ctx_valid, flags; unsigned long ctx_valid, flags;
int cpu; int cpu = smp_processor_id();
per_cpu(per_cpu_secondary_mm, cpu) = mm;
if (unlikely(mm == &init_mm)) if (unlikely(mm == &init_mm))
return; return;
@ -121,7 +117,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
* for the first time, we must flush that context out of the * for the first time, we must flush that context out of the
* local TLB. * local TLB.
*/ */
cpu = smp_processor_id();
if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
cpumask_set_cpu(cpu, mm_cpumask(mm)); cpumask_set_cpu(cpu, mm_cpumask(mm));
__flush_tlb_mm(CTX_HWBITS(mm->context), __flush_tlb_mm(CTX_HWBITS(mm->context),
@ -131,26 +126,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
} }
#define deactivate_mm(tsk,mm) do { } while (0) #define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
/* Activate a new MM instance for the current task. */
static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
{
unsigned long flags;
int cpu;
spin_lock_irqsave(&mm->context.lock, flags);
if (!CTX_VALID(mm->context))
get_new_mmu_context(mm);
cpu = smp_processor_id();
if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
cpumask_set_cpu(cpu, mm_cpumask(mm));
load_secondary_context(mm);
__flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
tsb_context_switch(mm);
spin_unlock_irqrestore(&mm->context.lock, flags);
}
#endif /* !(__ASSEMBLY__) */ #endif /* !(__ASSEMBLY__) */
#endif /* !(__SPARC64_MMU_CONTEXT_H) */ #endif /* !(__SPARC64_MMU_CONTEXT_H) */

View file

@ -20,7 +20,6 @@
#define PIL_SMP_CALL_FUNC 1 #define PIL_SMP_CALL_FUNC 1
#define PIL_SMP_RECEIVE_SIGNAL 2 #define PIL_SMP_RECEIVE_SIGNAL 2
#define PIL_SMP_CAPTURE 3 #define PIL_SMP_CAPTURE 3
#define PIL_SMP_CTX_NEW_VERSION 4
#define PIL_DEVICE_IRQ 5 #define PIL_DEVICE_IRQ 5
#define PIL_SMP_CALL_FUNC_SNGL 6 #define PIL_SMP_CALL_FUNC_SNGL 6
#define PIL_DEFERRED_PCR_WORK 7 #define PIL_DEFERRED_PCR_WORK 7

View file

@ -327,6 +327,7 @@ struct vio_dev {
int compat_len; int compat_len;
u64 dev_no; u64 dev_no;
u64 id;
unsigned long channel_id; unsigned long channel_id;

View file

@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned long page; unsigned long page;
void *mondo, *p;
BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
/* Make sure mondo block is 64byte aligned */
p = kzalloc(127, GFP_KERNEL);
if (!p) {
prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
prom_halt();
}
mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
tb->cpu_mondo_block_pa = __pa(mondo);
page = get_zeroed_page(GFP_KERNEL); page = get_zeroed_page(GFP_KERNEL);
if (!page) { if (!page) {
prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
prom_halt(); prom_halt();
} }
tb->cpu_mondo_block_pa = __pa(page); tb->cpu_list_pa = __pa(page);
tb->cpu_list_pa = __pa(page + 64);
#endif #endif
} }

View file

@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
/* smp_64.c */ /* smp_64.c */
void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs); void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs); void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs); void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs); void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);

View file

@ -963,37 +963,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
preempt_enable(); preempt_enable();
} }
void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
{
struct mm_struct *mm;
unsigned long flags;
clear_softint(1 << irq);
/* See if we need to allocate a new TLB context because
* the version of the one we are using is now out of date.
*/
mm = current->active_mm;
if (unlikely(!mm || (mm == &init_mm)))
return;
spin_lock_irqsave(&mm->context.lock, flags);
if (unlikely(!CTX_VALID(mm->context)))
get_new_mmu_context(mm);
spin_unlock_irqrestore(&mm->context.lock, flags);
load_secondary_context(mm);
__flush_tlb_mm(CTX_HWBITS(mm->context),
SECONDARY_CONTEXT);
}
void smp_new_mmu_context_version(void)
{
smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
}
#ifdef CONFIG_KGDB #ifdef CONFIG_KGDB
void kgdb_roundup_cpus(unsigned long flags) void kgdb_roundup_cpus(unsigned long flags)
{ {

View file

@ -470,13 +470,16 @@ __tsb_context_switch:
.type copy_tsb,#function .type copy_tsb,#function
copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
* %o2=new_tsb_base, %o3=new_tsb_size * %o2=new_tsb_base, %o3=new_tsb_size
* %o4=page_size_shift
*/ */
sethi %uhi(TSB_PASS_BITS), %g7 sethi %uhi(TSB_PASS_BITS), %g7
srlx %o3, 4, %o3 srlx %o3, 4, %o3
add %o0, %o1, %g1 /* end of old tsb */ add %o0, %o1, %o1 /* end of old tsb */
sllx %g7, 32, %g7 sllx %g7, 32, %g7
sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
mov %o4, %g1 /* page_size_shift */
661: prefetcha [%o0] ASI_N, #one_read 661: prefetcha [%o0] ASI_N, #one_read
.section .tsb_phys_patch, "ax" .section .tsb_phys_patch, "ax"
.word 661b .word 661b
@ -501,9 +504,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
/* This can definitely be computed faster... */ /* This can definitely be computed faster... */
srlx %o0, 4, %o5 /* Build index */ srlx %o0, 4, %o5 /* Build index */
and %o5, 511, %o5 /* Mask index */ and %o5, 511, %o5 /* Mask index */
sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */ sllx %o5, %g1, %o5 /* Put into vaddr position */
or %o4, %o5, %o4 /* Full VADDR. */ or %o4, %o5, %o4 /* Full VADDR. */
srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */ srlx %o4, %g1, %o4 /* Shift down to create index */
and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
@ -511,7 +514,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
80: add %o0, 16, %o0 80: add %o0, 16, %o0
cmp %o0, %g1 cmp %o0, %o1
bne,pt %xcc, 90b bne,pt %xcc, 90b
nop nop

View file

@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4) tl0_irq4: BTRAP(0x44)
#else #else
tl0_irq1: BTRAP(0x41) tl0_irq1: BTRAP(0x41)
tl0_irq2: BTRAP(0x42) tl0_irq2: BTRAP(0x42)

View file

@ -302,13 +302,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
if (!id) { if (!id) {
dev_set_name(&vdev->dev, "%s", bus_id_name); dev_set_name(&vdev->dev, "%s", bus_id_name);
vdev->dev_no = ~(u64)0; vdev->dev_no = ~(u64)0;
vdev->id = ~(u64)0;
} else if (!cfg_handle) { } else if (!cfg_handle) {
dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id); dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
vdev->dev_no = *id; vdev->dev_no = *id;
vdev->id = ~(u64)0;
} else { } else {
dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name, dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
*cfg_handle, *id); *cfg_handle, *id);
vdev->dev_no = *cfg_handle; vdev->dev_no = *cfg_handle;
vdev->id = *id;
} }
vdev->dev.parent = parent; vdev->dev.parent = parent;
@ -351,27 +354,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node)
(void) vio_create_one(hp, node, &root_vdev->dev); (void) vio_create_one(hp, node, &root_vdev->dev);
} }
struct vio_md_node_query {
const char *type;
u64 dev_no;
u64 id;
};
static int vio_md_node_match(struct device *dev, void *arg) static int vio_md_node_match(struct device *dev, void *arg)
{ {
struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
struct vio_dev *vdev = to_vio_dev(dev); struct vio_dev *vdev = to_vio_dev(dev);
if (vdev->mp == (u64) arg) if (vdev->dev_no != query->dev_no)
return 1; return 0;
if (vdev->id != query->id)
return 0;
if (strcmp(vdev->type, query->type))
return 0;
return 0; return 1;
} }
static void vio_remove(struct mdesc_handle *hp, u64 node) static void vio_remove(struct mdesc_handle *hp, u64 node)
{ {
const char *type;
const u64 *id, *cfg_handle;
u64 a;
struct vio_md_node_query query;
struct device *dev; struct device *dev;
dev = device_find_child(&root_vdev->dev, (void *) node, type = mdesc_get_property(hp, node, "device-type", NULL);
if (!type) {
type = mdesc_get_property(hp, node, "name", NULL);
if (!type)
type = mdesc_node_name(hp, node);
}
query.type = type;
id = mdesc_get_property(hp, node, "id", NULL);
cfg_handle = NULL;
mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
u64 target;
target = mdesc_arc_target(hp, a);
cfg_handle = mdesc_get_property(hp, target,
"cfg-handle", NULL);
if (cfg_handle)
break;
}
if (!id) {
query.dev_no = ~(u64)0;
query.id = ~(u64)0;
} else if (!cfg_handle) {
query.dev_no = *id;
query.id = ~(u64)0;
} else {
query.dev_no = *cfg_handle;
query.id = *id;
}
dev = device_find_child(&root_vdev->dev, &query,
vio_md_node_match); vio_md_node_match);
if (dev) { if (dev) {
printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
device_unregister(dev); device_unregister(dev);
put_device(dev); put_device(dev);
} else {
if (!id)
printk(KERN_ERR "VIO: Removed unknown %s node.\n",
type);
else if (!cfg_handle)
printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
type, *id);
else
printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
type, *cfg_handle, *id);
} }
} }

View file

@ -15,6 +15,7 @@ lib-$(CONFIG_SPARC32) += copy_user.o locks.o
lib-$(CONFIG_SPARC64) += atomic_64.o lib-$(CONFIG_SPARC64) += atomic_64.o
lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
lib-$(CONFIG_SPARC64) += multi3.o
lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o

35
arch/sparc/lib/multi3.S Normal file
View file

@ -0,0 +1,35 @@
#include <linux/linkage.h>
#include <asm/export.h>
.text
.align 4
ENTRY(__multi3) /* %o0 = u, %o1 = v */
mov %o1, %g1
srl %o3, 0, %g4
mulx %g4, %g1, %o1
srlx %g1, 0x20, %g3
mulx %g3, %g4, %g5
sllx %g5, 0x20, %o5
srl %g1, 0, %g4
sub %o1, %o5, %o5
srlx %o5, 0x20, %o5
addcc %g5, %o5, %g5
srlx %o3, 0x20, %o5
mulx %g4, %o5, %g4
mulx %g3, %o5, %o5
sethi %hi(0x80000000), %g3
addcc %g5, %g4, %g5
srlx %g5, 0x20, %g5
add %g3, %g3, %g3
movcc %xcc, %g0, %g3
addcc %o5, %g5, %o5
sllx %g4, 0x20, %g4
add %o1, %g4, %o1
add %o5, %g3, %g2
mulx %g1, %o2, %g1
add %g1, %g2, %g1
mulx %o0, %o3, %o0
retl
add %g1, %o0, %o0
ENDPROC(__multi3)
EXPORT_SYMBOL(__multi3)

View file

@ -658,10 +658,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
/* get_new_mmu_context() uses "cache + 1". */ /* get_new_mmu_context() uses "cache + 1". */
DEFINE_SPINLOCK(ctx_alloc_lock); DEFINE_SPINLOCK(ctx_alloc_lock);
unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; unsigned long tlb_context_cache = CTX_FIRST_VERSION;
#define MAX_CTX_NR (1UL << CTX_NR_BITS) #define MAX_CTX_NR (1UL << CTX_NR_BITS)
#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
static void mmu_context_wrap(void)
{
unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
unsigned long new_ver, new_ctx, old_ctx;
struct mm_struct *mm;
int cpu;
bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
/* Reserve kernel context */
set_bit(0, mmu_context_bmap);
new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
if (unlikely(new_ver == 0))
new_ver = CTX_FIRST_VERSION;
tlb_context_cache = new_ver;
/*
* Make sure that any new mm that are added into per_cpu_secondary_mm,
* are going to go through get_new_mmu_context() path.
*/
mb();
/*
* Updated versions to current on those CPUs that had valid secondary
* contexts
*/
for_each_online_cpu(cpu) {
/*
* If a new mm is stored after we took this mm from the array,
* it will go into get_new_mmu_context() path, because we
* already bumped the version in tlb_context_cache.
*/
mm = per_cpu(per_cpu_secondary_mm, cpu);
if (unlikely(!mm || mm == &init_mm))
continue;
old_ctx = mm->context.sparc64_ctx_val;
if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
mm->context.sparc64_ctx_val = new_ctx;
}
}
}
/* Caller does TLB context flushing on local CPU if necessary. /* Caller does TLB context flushing on local CPU if necessary.
* The caller also ensures that CTX_VALID(mm->context) is false. * The caller also ensures that CTX_VALID(mm->context) is false.
@ -677,48 +725,30 @@ void get_new_mmu_context(struct mm_struct *mm)
{ {
unsigned long ctx, new_ctx; unsigned long ctx, new_ctx;
unsigned long orig_pgsz_bits; unsigned long orig_pgsz_bits;
int new_version;
spin_lock(&ctx_alloc_lock); spin_lock(&ctx_alloc_lock);
retry:
/* wrap might have happened, test again if our context became valid */
if (unlikely(CTX_VALID(mm->context)))
goto out;
orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
ctx = (tlb_context_cache + 1) & CTX_NR_MASK; ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
new_version = 0;
if (new_ctx >= (1 << CTX_NR_BITS)) { if (new_ctx >= (1 << CTX_NR_BITS)) {
new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
if (new_ctx >= ctx) { if (new_ctx >= ctx) {
int i; mmu_context_wrap();
new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + goto retry;
CTX_FIRST_VERSION;
if (new_ctx == 1)
new_ctx = CTX_FIRST_VERSION;
/* Don't call memset, for 16 entries that's just
* plain silly...
*/
mmu_context_bmap[0] = 3;
mmu_context_bmap[1] = 0;
mmu_context_bmap[2] = 0;
mmu_context_bmap[3] = 0;
for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
mmu_context_bmap[i + 0] = 0;
mmu_context_bmap[i + 1] = 0;
mmu_context_bmap[i + 2] = 0;
mmu_context_bmap[i + 3] = 0;
}
new_version = 1;
goto out;
} }
} }
if (mm->context.sparc64_ctx_val)
cpumask_clear(mm_cpumask(mm));
mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
out:
tlb_context_cache = new_ctx; tlb_context_cache = new_ctx;
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
out:
spin_unlock(&ctx_alloc_lock); spin_unlock(&ctx_alloc_lock);
if (unlikely(new_version))
smp_new_mmu_context_version();
} }
static int numa_enabled = 1; static int numa_enabled = 1;

View file

@ -451,7 +451,8 @@ retry_tsb_alloc:
extern void copy_tsb(unsigned long old_tsb_base, extern void copy_tsb(unsigned long old_tsb_base,
unsigned long old_tsb_size, unsigned long old_tsb_size,
unsigned long new_tsb_base, unsigned long new_tsb_base,
unsigned long new_tsb_size); unsigned long new_tsb_size,
unsigned long page_size_shift);
unsigned long old_tsb_base = (unsigned long) old_tsb; unsigned long old_tsb_base = (unsigned long) old_tsb;
unsigned long new_tsb_base = (unsigned long) new_tsb; unsigned long new_tsb_base = (unsigned long) new_tsb;
@ -459,7 +460,9 @@ retry_tsb_alloc:
old_tsb_base = __pa(old_tsb_base); old_tsb_base = __pa(old_tsb_base);
new_tsb_base = __pa(new_tsb_base); new_tsb_base = __pa(new_tsb_base);
} }
copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
tsb_index == MM_TSB_BASE ?
PAGE_SHIFT : REAL_HPAGE_SHIFT);
} }
mm->context.tsb_block[tsb_index].tsb = new_tsb; mm->context.tsb_block[tsb_index].tsb = new_tsb;

View file

@ -971,11 +971,6 @@ xcall_capture:
wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
retry retry
.globl xcall_new_mmu_context_version
xcall_new_mmu_context_version:
wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
retry
#ifdef CONFIG_KGDB #ifdef CONFIG_KGDB
.globl xcall_kgdb_capture .globl xcall_kgdb_capture
xcall_kgdb_capture: xcall_kgdb_capture:

View file

@ -162,8 +162,8 @@ void kvm_async_pf_task_wait(u32 token)
*/ */
rcu_irq_exit(); rcu_irq_exit();
native_safe_halt(); native_safe_halt();
rcu_irq_enter();
local_irq_disable(); local_irq_disable();
rcu_irq_enter();
} }
} }
if (!n.halted) if (!n.halted)

View file

@ -765,18 +765,20 @@ out:
static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i) static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
{ {
struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i]; struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
int j, nent = vcpu->arch.cpuid_nent; struct kvm_cpuid_entry2 *ej;
int j = i;
int nent = vcpu->arch.cpuid_nent;
e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT; e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
/* when no next entry is found, the current entry[i] is reselected */ /* when no next entry is found, the current entry[i] is reselected */
for (j = i + 1; ; j = (j + 1) % nent) { do {
struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j]; j = (j + 1) % nent;
if (ej->function == e->function) { ej = &vcpu->arch.cpuid_entries[j];
ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT; } while (ej->function != e->function);
return j;
} ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
}
return 0; /* silence gcc, even though control never reaches here */ return j;
} }
/* find an entry with matching function, matching index (if needed), and that /* find an entry with matching function, matching index (if needed), and that

View file

@ -3489,12 +3489,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch); return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
} }
static bool can_do_async_pf(struct kvm_vcpu *vcpu) bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
{ {
if (unlikely(!lapic_in_kernel(vcpu) || if (unlikely(!lapic_in_kernel(vcpu) ||
kvm_event_needs_reinjection(vcpu))) kvm_event_needs_reinjection(vcpu)))
return false; return false;
if (is_guest_mode(vcpu))
return false;
return kvm_x86_ops->interrupt_allowed(vcpu); return kvm_x86_ops->interrupt_allowed(vcpu);
} }
@ -3510,7 +3513,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
if (!async) if (!async)
return false; /* *pfn has correct page already */ return false; /* *pfn has correct page already */
if (!prefault && can_do_async_pf(vcpu)) { if (!prefault && kvm_can_do_async_pf(vcpu)) {
trace_kvm_try_async_get_page(gva, gfn); trace_kvm_try_async_get_page(gva, gfn);
if (kvm_find_async_pf_gfn(vcpu, gfn)) { if (kvm_find_async_pf_gfn(vcpu, gfn)) {
trace_kvm_async_pf_doublefault(gva, gfn); trace_kvm_async_pf_doublefault(gva, gfn);

View file

@ -75,6 +75,7 @@ enum {
int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct); int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu); void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly); void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
{ {

View file

@ -8444,8 +8444,7 @@ bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
return true; return true;
else else
return !kvm_event_needs_reinjection(vcpu) && return kvm_can_do_async_pf(vcpu);
kvm_x86_ops->interrupt_allowed(vcpu);
} }
void kvm_arch_start_assignment(struct kvm *kvm) void kvm_arch_start_assignment(struct kvm *kvm)

View file

@ -358,6 +358,9 @@ void __init efi_free_boot_services(void)
free_bootmem_late(start, size); free_bootmem_late(start, size);
} }
if (!num_entries)
return;
new_size = efi.memmap.desc_size * num_entries; new_size = efi.memmap.desc_size * num_entries;
new_phys = efi_memmap_alloc(num_entries); new_phys = efi_memmap_alloc(num_entries);
if (!new_phys) { if (!new_phys) {

View file

@ -36,9 +36,13 @@ static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
static const int cfq_hist_divisor = 4; static const int cfq_hist_divisor = 4;
/* /*
* offset from end of service tree * offset from end of queue service tree for idle class
*/ */
#define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5) #define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5)
/* offset from end of group service tree under time slice mode */
#define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5)
/* offset from end of group service under IOPS mode */
#define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5)
/* /*
* below this threshold, we consider thinktime immediate * below this threshold, we consider thinktime immediate
@ -1370,6 +1374,14 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
cfqg->vfraction = max_t(unsigned, vfr, 1); cfqg->vfraction = max_t(unsigned, vfr, 1);
} }
static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd)
{
if (!iops_mode(cfqd))
return CFQ_SLICE_MODE_GROUP_DELAY;
else
return CFQ_IOPS_MODE_GROUP_DELAY;
}
static void static void
cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
{ {
@ -1389,7 +1401,8 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
n = rb_last(&st->rb); n = rb_last(&st->rb);
if (n) { if (n) {
__cfqg = rb_entry_cfqg(n); __cfqg = rb_entry_cfqg(n);
cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; cfqg->vdisktime = __cfqg->vdisktime +
cfq_get_cfqg_vdisktime_delay(cfqd);
} else } else
cfqg->vdisktime = st->min_vdisktime; cfqg->vdisktime = st->min_vdisktime;
cfq_group_service_tree_add(st, cfqg); cfq_group_service_tree_add(st, cfqg);

View file

@ -140,7 +140,7 @@ int public_key_verify_signature(const struct public_key *pkey,
* signature and returns that to us. * signature and returns that to us.
*/ */
ret = crypto_akcipher_verify(req); ret = crypto_akcipher_verify(req);
if (ret == -EINPROGRESS) { if ((ret == -EINPROGRESS) || (ret == -EBUSY)) {
wait_for_completion(&compl.completion); wait_for_completion(&compl.completion);
ret = compl.err; ret = compl.err;
} }

View file

@ -1768,9 +1768,8 @@ static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
break; break;
case -EINPROGRESS: case -EINPROGRESS:
case -EBUSY: case -EBUSY:
ret = wait_for_completion_interruptible( wait_for_completion(&drbg->ctr_completion);
&drbg->ctr_completion); if (!drbg->ctr_async_err) {
if (!ret && !drbg->ctr_async_err) {
reinit_completion(&drbg->ctr_completion); reinit_completion(&drbg->ctr_completion);
break; break;
} }

View file

@ -152,10 +152,8 @@ static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
err = crypto_skcipher_encrypt(&data->req); err = crypto_skcipher_encrypt(&data->req);
if (err == -EINPROGRESS || err == -EBUSY) { if (err == -EINPROGRESS || err == -EBUSY) {
err = wait_for_completion_interruptible( wait_for_completion(&data->result.completion);
&data->result.completion); err = data->result.err;
if (!err)
err = data->result.err;
} }
if (err) if (err)

View file

@ -1362,6 +1362,40 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
{} {}
#endif #endif
/*
* On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected
* as DUMMY, or detected but eventually get a "link down" and never get up
* again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the
* port_map may hold a value of 0x00.
*
* Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports
* and can significantly reduce the occurrence of the problem.
*
* https://bugzilla.kernel.org/show_bug.cgi?id=189471
*/
static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv,
struct pci_dev *pdev)
{
static const struct dmi_system_id sysids[] = {
{
.ident = "Acer Switch Alpha 12",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271")
},
},
{ }
};
if (dmi_check_system(sysids)) {
dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n");
if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) {
hpriv->port_map = 0x7;
hpriv->cap = 0xC734FF02;
}
}
}
#ifdef CONFIG_ARM64 #ifdef CONFIG_ARM64
/* /*
* Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently. * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
@ -1597,6 +1631,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
"online status unreliable, applying workaround\n"); "online status unreliable, applying workaround\n");
} }
/* Acer SA5-271 workaround modifies private_data */
acer_sa5_271_workaround(hpriv, pdev);
/* CAP.NP sometimes indicate the index of the last enabled /* CAP.NP sometimes indicate the index of the last enabled
* port, at other times, that of the last possible port, so * port, at other times, that of the last possible port, so
* determining the maximum port number requires looking at * determining the maximum port number requires looking at

View file

@ -343,7 +343,7 @@ static int mmap_mem(struct file *file, struct vm_area_struct *vma)
phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT; phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
/* It's illegal to wrap around the end of the physical address space. */ /* It's illegal to wrap around the end of the physical address space. */
if (offset + (phys_addr_t)size < offset) if (offset + (phys_addr_t)size - 1 < offset)
return -EINVAL; return -EINVAL;
if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))

View file

@ -2539,6 +2539,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
list_empty(&cpufreq_policy_list)) { list_empty(&cpufreq_policy_list)) {
/* if all ->init() calls failed, unregister */ /* if all ->init() calls failed, unregister */
ret = -ENODEV;
pr_debug("%s: No CPU initialized for driver %s\n", __func__, pr_debug("%s: No CPU initialized for driver %s\n", __func__,
driver_data->name); driver_data->name);
goto err_if_unreg; goto err_if_unreg;

View file

@ -201,6 +201,7 @@ struct ep93xx_dma_engine {
struct dma_device dma_dev; struct dma_device dma_dev;
bool m2m; bool m2m;
int (*hw_setup)(struct ep93xx_dma_chan *); int (*hw_setup)(struct ep93xx_dma_chan *);
void (*hw_synchronize)(struct ep93xx_dma_chan *);
void (*hw_shutdown)(struct ep93xx_dma_chan *); void (*hw_shutdown)(struct ep93xx_dma_chan *);
void (*hw_submit)(struct ep93xx_dma_chan *); void (*hw_submit)(struct ep93xx_dma_chan *);
int (*hw_interrupt)(struct ep93xx_dma_chan *); int (*hw_interrupt)(struct ep93xx_dma_chan *);
@ -323,6 +324,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
| M2P_CONTROL_ENABLE; | M2P_CONTROL_ENABLE;
m2p_set_control(edmac, control); m2p_set_control(edmac, control);
edmac->buffer = 0;
return 0; return 0;
} }
@ -331,21 +334,27 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
} }
static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
{ {
unsigned long flags;
u32 control; u32 control;
spin_lock_irqsave(&edmac->lock, flags);
control = readl(edmac->regs + M2P_CONTROL); control = readl(edmac->regs + M2P_CONTROL);
control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
m2p_set_control(edmac, control); m2p_set_control(edmac, control);
spin_unlock_irqrestore(&edmac->lock, flags);
while (m2p_channel_state(edmac) >= M2P_STATE_ON) while (m2p_channel_state(edmac) >= M2P_STATE_ON)
cpu_relax(); schedule();
}
static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
{
m2p_set_control(edmac, 0); m2p_set_control(edmac, 0);
while (m2p_channel_state(edmac) == M2P_STATE_STALL) while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
cpu_relax(); dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
} }
static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
@ -1160,6 +1169,26 @@ fail:
return NULL; return NULL;
} }
/**
* ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
* current context.
* @chan: channel
*
* Synchronizes the DMA channel termination to the current context. When this
* function returns it is guaranteed that all transfers for previously issued
* descriptors have stopped and and it is safe to free the memory associated
* with them. Furthermore it is guaranteed that all complete callback functions
* for a previously submitted descriptor have finished running and it is safe to
* free resources accessed from within the complete callbacks.
*/
static void ep93xx_dma_synchronize(struct dma_chan *chan)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
if (edmac->edma->hw_synchronize)
edmac->edma->hw_synchronize(edmac);
}
/** /**
* ep93xx_dma_terminate_all - terminate all transactions * ep93xx_dma_terminate_all - terminate all transactions
* @chan: channel * @chan: channel
@ -1323,6 +1352,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
dma_dev->device_config = ep93xx_dma_slave_config; dma_dev->device_config = ep93xx_dma_slave_config;
dma_dev->device_synchronize = ep93xx_dma_synchronize;
dma_dev->device_terminate_all = ep93xx_dma_terminate_all; dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
dma_dev->device_issue_pending = ep93xx_dma_issue_pending; dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
dma_dev->device_tx_status = ep93xx_dma_tx_status; dma_dev->device_tx_status = ep93xx_dma_tx_status;
@ -1340,6 +1370,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev)
} else { } else {
dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
edma->hw_synchronize = m2p_hw_synchronize;
edma->hw_setup = m2p_hw_setup; edma->hw_setup = m2p_hw_setup;
edma->hw_shutdown = m2p_hw_shutdown; edma->hw_shutdown = m2p_hw_shutdown;
edma->hw_submit = m2p_hw_submit; edma->hw_submit = m2p_hw_submit;

View file

@ -161,6 +161,7 @@ struct mv_xor_v2_device {
struct mv_xor_v2_sw_desc *sw_desq; struct mv_xor_v2_sw_desc *sw_desq;
int desc_size; int desc_size;
unsigned int npendings; unsigned int npendings;
unsigned int hw_queue_idx;
}; };
/** /**
@ -213,18 +214,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
} }
} }
/*
* Return the next available index in the DESQ.
*/
static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev)
{
/* read the index for the next available descriptor in the DESQ */
u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF);
return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT)
& MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK);
}
/* /*
* notify the engine of new descriptors, and update the available index. * notify the engine of new descriptors, and update the available index.
*/ */
@ -257,22 +246,6 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev)
return MV_XOR_V2_EXT_DESC_SIZE; return MV_XOR_V2_EXT_DESC_SIZE;
} }
/*
* Set the IMSG threshold
*/
static inline
void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val)
{
u32 reg;
reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT);
writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF);
}
static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
{ {
struct mv_xor_v2_device *xor_dev = data; struct mv_xor_v2_device *xor_dev = data;
@ -288,12 +261,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
if (!ndescs) if (!ndescs)
return IRQ_NONE; return IRQ_NONE;
/*
* Update IMSG threshold, to disable new IMSG interrupts until
* end of the tasklet
*/
mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM);
/* schedule a tasklet to handle descriptors callbacks */ /* schedule a tasklet to handle descriptors callbacks */
tasklet_schedule(&xor_dev->irq_tasklet); tasklet_schedule(&xor_dev->irq_tasklet);
@ -306,7 +273,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
static dma_cookie_t static dma_cookie_t
mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
{ {
int desq_ptr;
void *dest_hw_desc; void *dest_hw_desc;
dma_cookie_t cookie; dma_cookie_t cookie;
struct mv_xor_v2_sw_desc *sw_desc = struct mv_xor_v2_sw_desc *sw_desc =
@ -322,15 +288,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_bh(&xor_dev->lock); spin_lock_bh(&xor_dev->lock);
cookie = dma_cookie_assign(tx); cookie = dma_cookie_assign(tx);
/* get the next available slot in the DESQ */
desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev);
/* copy the HW descriptor from the SW descriptor to the DESQ */ /* copy the HW descriptor from the SW descriptor to the DESQ */
dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr; dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size); memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
xor_dev->npendings++; xor_dev->npendings++;
xor_dev->hw_queue_idx++;
if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
xor_dev->hw_queue_idx = 0;
spin_unlock_bh(&xor_dev->lock); spin_unlock_bh(&xor_dev->lock);
@ -344,6 +310,7 @@ static struct mv_xor_v2_sw_desc *
mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
{ {
struct mv_xor_v2_sw_desc *sw_desc; struct mv_xor_v2_sw_desc *sw_desc;
bool found = false;
/* Lock the channel */ /* Lock the channel */
spin_lock_bh(&xor_dev->lock); spin_lock_bh(&xor_dev->lock);
@ -355,19 +322,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev)
return NULL; return NULL;
} }
/* get a free SW descriptor from the SW DESQ */ list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) {
sw_desc = list_first_entry(&xor_dev->free_sw_desc, if (async_tx_test_ack(&sw_desc->async_tx)) {
struct mv_xor_v2_sw_desc, free_list); found = true;
break;
}
}
if (!found) {
spin_unlock_bh(&xor_dev->lock);
return NULL;
}
list_del(&sw_desc->free_list); list_del(&sw_desc->free_list);
/* Release the channel */ /* Release the channel */
spin_unlock_bh(&xor_dev->lock); spin_unlock_bh(&xor_dev->lock);
/* set the async tx descriptor */
dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan);
sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
async_tx_ack(&sw_desc->async_tx);
return sw_desc; return sw_desc;
} }
@ -389,6 +360,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
__func__, len, &src, &dest, flags); __func__, len, &src, &dest, flags);
sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
if (!sw_desc)
return NULL;
sw_desc->async_tx.flags = flags; sw_desc->async_tx.flags = flags;
@ -443,6 +416,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
__func__, src_cnt, len, &dest, flags); __func__, src_cnt, len, &dest, flags);
sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
if (!sw_desc)
return NULL;
sw_desc->async_tx.flags = flags; sw_desc->async_tx.flags = flags;
@ -491,6 +466,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags)
container_of(chan, struct mv_xor_v2_device, dmachan); container_of(chan, struct mv_xor_v2_device, dmachan);
sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); sw_desc = mv_xor_v2_prep_sw_desc(xor_dev);
if (!sw_desc)
return NULL;
/* set the HW descriptor */ /* set the HW descriptor */
hw_descriptor = &sw_desc->hw_desc; hw_descriptor = &sw_desc->hw_desc;
@ -554,7 +531,6 @@ static void mv_xor_v2_tasklet(unsigned long data)
{ {
struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data; struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data;
int pending_ptr, num_of_pending, i; int pending_ptr, num_of_pending, i;
struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL;
struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL; struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL;
dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__); dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__);
@ -562,17 +538,10 @@ static void mv_xor_v2_tasklet(unsigned long data)
/* get the pending descriptors parameters */ /* get the pending descriptors parameters */
num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr); num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr);
/* next HW descriptor */
next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr;
/* loop over free descriptors */ /* loop over free descriptors */
for (i = 0; i < num_of_pending; i++) { for (i = 0; i < num_of_pending; i++) {
struct mv_xor_v2_descriptor *next_pending_hw_desc =
if (pending_ptr > MV_XOR_V2_DESC_NUM) xor_dev->hw_desq_virt + pending_ptr;
pending_ptr = 0;
if (next_pending_sw_desc != NULL)
next_pending_hw_desc++;
/* get the SW descriptor related to the HW descriptor */ /* get the SW descriptor related to the HW descriptor */
next_pending_sw_desc = next_pending_sw_desc =
@ -608,15 +577,14 @@ static void mv_xor_v2_tasklet(unsigned long data)
/* increment the next descriptor */ /* increment the next descriptor */
pending_ptr++; pending_ptr++;
if (pending_ptr >= MV_XOR_V2_DESC_NUM)
pending_ptr = 0;
} }
if (num_of_pending != 0) { if (num_of_pending != 0) {
/* free the descriptores */ /* free the descriptores */
mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending); mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending);
} }
/* Update IMSG threshold, to enable new IMSG interrupts */
mv_xor_v2_set_imsg_thrd(xor_dev, 0);
} }
/* /*
@ -648,9 +616,6 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32,
xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF);
/* enable the DMA engine */
writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
/* /*
* This is a temporary solution, until we activate the * This is a temporary solution, until we activate the
* SMMU. Set the attributes for reading & writing data buffers * SMMU. Set the attributes for reading & writing data buffers
@ -694,6 +659,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev)
reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL; reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL;
writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE);
/* enable the DMA engine */
writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF);
return 0; return 0;
} }
@ -725,6 +693,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, xor_dev); platform_set_drvdata(pdev, xor_dev);
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
if (ret)
return ret;
xor_dev->clk = devm_clk_get(&pdev->dev, NULL); xor_dev->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER)
return -EPROBE_DEFER; return -EPROBE_DEFER;
@ -785,8 +757,15 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
/* add all SW descriptors to the free list */ /* add all SW descriptors to the free list */
for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) {
xor_dev->sw_desq[i].idx = i; struct mv_xor_v2_sw_desc *sw_desc =
list_add(&xor_dev->sw_desq[i].free_list, xor_dev->sw_desq + i;
sw_desc->idx = i;
dma_async_tx_descriptor_init(&sw_desc->async_tx,
&xor_dev->dmachan);
sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit;
async_tx_ack(&sw_desc->async_tx);
list_add(&sw_desc->free_list,
&xor_dev->free_sw_desc); &xor_dev->free_sw_desc);
} }

View file

@ -117,7 +117,7 @@ struct usb_dmac {
#define USB_DMASWR 0x0008 #define USB_DMASWR 0x0008
#define USB_DMASWR_SWR (1 << 0) #define USB_DMASWR_SWR (1 << 0)
#define USB_DMAOR 0x0060 #define USB_DMAOR 0x0060
#define USB_DMAOR_AE (1 << 2) #define USB_DMAOR_AE (1 << 1)
#define USB_DMAOR_DME (1 << 0) #define USB_DMAOR_DME (1 << 0)
#define USB_DMASAR 0x0000 #define USB_DMASAR 0x0000

View file

@ -900,6 +900,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300; u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
/* disable mclk switching if the refresh is >120Hz, even if the
* blanking period would allow it
*/
if (amdgpu_dpm_get_vrefresh(adev) > 120)
return true;
if (vblank_time < switch_limit) if (vblank_time < switch_limit)
return true; return true;
else else

View file

@ -379,7 +379,12 @@ EXPORT_SYMBOL(drm_put_dev);
void drm_unplug_dev(struct drm_device *dev) void drm_unplug_dev(struct drm_device *dev)
{ {
/* for a USB device */ /* for a USB device */
drm_dev_unregister(dev); if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_unregister_all(dev);
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
drm_minor_unregister(dev, DRM_MINOR_CONTROL);
mutex_lock(&drm_global_mutex); mutex_lock(&drm_global_mutex);

View file

@ -573,9 +573,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (i915_inject_load_failure()) if (i915_inject_load_failure())
return -ENODEV; return -ENODEV;
ret = intel_bios_init(dev_priv); intel_bios_init(dev_priv);
if (ret)
DRM_INFO("failed to find VBIOS tables\n");
/* If we have > 1 VGA cards, then we need to arbitrate access /* If we have > 1 VGA cards, then we need to arbitrate access
* to the common VGA resources. * to the common VGA resources.

View file

@ -3584,7 +3584,7 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
extern void intel_i2c_reset(struct drm_device *dev); extern void intel_i2c_reset(struct drm_device *dev);
/* intel_bios.c */ /* intel_bios.c */
int intel_bios_init(struct drm_i915_private *dev_priv); void intel_bios_init(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size); bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);

View file

@ -1332,6 +1332,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
return; return;
} }
/* Common defaults which may be overridden by VBT. */
static void static void
init_vbt_defaults(struct drm_i915_private *dev_priv) init_vbt_defaults(struct drm_i915_private *dev_priv)
{ {
@ -1368,6 +1369,18 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
&dev_priv->vbt.ddi_port_info[port]; &dev_priv->vbt.ddi_port_info[port];
info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN; info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
}
}
/* Defaults to initialize only if there is no VBT. */
static void
init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
{
enum port port;
for (port = PORT_A; port < I915_MAX_PORTS; port++) {
struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
info->supports_dvi = (port != PORT_A && port != PORT_E); info->supports_dvi = (port != PORT_A && port != PORT_E);
info->supports_hdmi = info->supports_dvi; info->supports_hdmi = info->supports_dvi;
@ -1450,36 +1463,35 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
* intel_bios_init - find VBT and initialize settings from the BIOS * intel_bios_init - find VBT and initialize settings from the BIOS
* @dev_priv: i915 device instance * @dev_priv: i915 device instance
* *
* Loads the Video BIOS and checks that the VBT exists. Sets scratch registers * Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT
* to appropriate values. * was not found in ACPI OpRegion, try to find it in PCI ROM first. Also
* * initialize some defaults if the VBT is not present at all.
* Returns 0 on success, nonzero on failure.
*/ */
int void intel_bios_init(struct drm_i915_private *dev_priv)
intel_bios_init(struct drm_i915_private *dev_priv)
{ {
struct pci_dev *pdev = dev_priv->drm.pdev; struct pci_dev *pdev = dev_priv->drm.pdev;
const struct vbt_header *vbt = dev_priv->opregion.vbt; const struct vbt_header *vbt = dev_priv->opregion.vbt;
const struct bdb_header *bdb; const struct bdb_header *bdb;
u8 __iomem *bios = NULL; u8 __iomem *bios = NULL;
if (HAS_PCH_NOP(dev_priv)) if (HAS_PCH_NOP(dev_priv)) {
return -ENODEV; DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
return;
}
init_vbt_defaults(dev_priv); init_vbt_defaults(dev_priv);
/* If the OpRegion does not have VBT, look in PCI ROM. */
if (!vbt) { if (!vbt) {
size_t size; size_t size;
bios = pci_map_rom(pdev, &size); bios = pci_map_rom(pdev, &size);
if (!bios) if (!bios)
return -1; goto out;
vbt = find_vbt(bios, size); vbt = find_vbt(bios, size);
if (!vbt) { if (!vbt)
pci_unmap_rom(pdev, bios); goto out;
return -1;
}
DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n"); DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
} }
@ -1504,10 +1516,14 @@ intel_bios_init(struct drm_i915_private *dev_priv)
parse_mipi_sequence(dev_priv, bdb); parse_mipi_sequence(dev_priv, bdb);
parse_ddi_ports(dev_priv, bdb); parse_ddi_ports(dev_priv, bdb);
out:
if (!vbt) {
DRM_INFO("Failed to find VBIOS tables (VBT)\n");
init_vbt_missing_defaults(dev_priv);
}
if (bios) if (bios)
pci_unmap_rom(pdev, bios); pci_unmap_rom(pdev, bios);
return 0;
} }
/** /**

View file

@ -801,6 +801,7 @@ static struct drm_driver msm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle, .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export, .gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import, .gem_prime_import = drm_gem_prime_import,
.gem_prime_res_obj = msm_gem_prime_res_obj,
.gem_prime_pin = msm_gem_prime_pin, .gem_prime_pin = msm_gem_prime_pin,
.gem_prime_unpin = msm_gem_prime_unpin, .gem_prime_unpin = msm_gem_prime_unpin,
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table, .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,

View file

@ -203,6 +203,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *msm_gem_prime_vmap(struct drm_gem_object *obj); void *msm_gem_prime_vmap(struct drm_gem_object *obj);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg); struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj); int msm_gem_prime_pin(struct drm_gem_object *obj);

View file

@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
if (!obj->import_attach) if (!obj->import_attach)
msm_gem_put_pages(obj); msm_gem_put_pages(obj);
} }
struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
return msm_obj->resv;
}

View file

@ -4,6 +4,7 @@
struct nvkm_alarm { struct nvkm_alarm {
struct list_head head; struct list_head head;
struct list_head exec;
u64 timestamp; u64 timestamp;
void (*func)(struct nvkm_alarm *); void (*func)(struct nvkm_alarm *);
}; };

View file

@ -50,7 +50,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
/* Move to completed list. We'll drop the lock before /* Move to completed list. We'll drop the lock before
* executing the callback so it can reschedule itself. * executing the callback so it can reschedule itself.
*/ */
list_move_tail(&alarm->head, &exec); list_del_init(&alarm->head);
list_add(&alarm->exec, &exec);
} }
/* Shut down interrupt if no more pending alarms. */ /* Shut down interrupt if no more pending alarms. */
@ -59,8 +60,8 @@ nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
spin_unlock_irqrestore(&tmr->lock, flags); spin_unlock_irqrestore(&tmr->lock, flags);
/* Execute completed callbacks. */ /* Execute completed callbacks. */
list_for_each_entry_safe(alarm, atemp, &exec, head) { list_for_each_entry_safe(alarm, atemp, &exec, exec) {
list_del_init(&alarm->head); list_del(&alarm->exec);
alarm->func(alarm); alarm->func(alarm);
} }
} }

View file

@ -368,6 +368,8 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
return fifo_state->static_buffer; return fifo_state->static_buffer;
else { else {
fifo_state->dynamic_buffer = vmalloc(bytes); fifo_state->dynamic_buffer = vmalloc(bytes);
if (!fifo_state->dynamic_buffer)
goto out_err;
return fifo_state->dynamic_buffer; return fifo_state->dynamic_buffer;
} }
} }

View file

@ -1275,11 +1275,14 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret; int ret;
uint32_t size; uint32_t size;
uint32_t backup_handle; uint32_t backup_handle = 0;
if (req->multisample_count != 0) if (req->multisample_count != 0)
return -EINVAL; return -EINVAL;
if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
return -EINVAL;
if (unlikely(vmw_user_surface_size == 0)) if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
128; 128;
@ -1315,12 +1318,16 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
&res->backup, &res->backup,
&user_srf->backup_base); &user_srf->backup_base);
if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < if (ret == 0) {
res->backup_size) { if (res->backup->base.num_pages * PAGE_SIZE <
DRM_ERROR("Surface backup buffer is too small.\n"); res->backup_size) {
vmw_dmabuf_unreference(&res->backup); DRM_ERROR("Surface backup buffer is too small.\n");
ret = -EINVAL; vmw_dmabuf_unreference(&res->backup);
goto out_unlock; ret = -EINVAL;
goto out_unlock;
} else {
backup_handle = req->buffer_handle;
}
} }
} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
ret = vmw_user_dmabuf_alloc(dev_priv, tfile, ret = vmw_user_dmabuf_alloc(dev_priv, tfile,

View file

@ -143,7 +143,7 @@ static void iproc_adc_reg_dump(struct iio_dev *indio_dev)
iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA); iproc_adc_dbg_reg(dev, adc_priv, IPROC_SOFT_BYPASS_DATA);
} }
static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data) static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
{ {
u32 channel_intr_status; u32 channel_intr_status;
u32 intr_status; u32 intr_status;
@ -167,7 +167,7 @@ static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
return IRQ_NONE; return IRQ_NONE;
} }
static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data) static irqreturn_t iproc_adc_interrupt_handler(int irq, void *data)
{ {
irqreturn_t retval = IRQ_NONE; irqreturn_t retval = IRQ_NONE;
struct iproc_adc_priv *adc_priv; struct iproc_adc_priv *adc_priv;
@ -181,7 +181,7 @@ static irqreturn_t iproc_adc_interrupt_thread(int irq, void *data)
adc_priv = iio_priv(indio_dev); adc_priv = iio_priv(indio_dev);
regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status); regmap_read(adc_priv->regmap, IPROC_INTERRUPT_STATUS, &intr_status);
dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_thread(),INTRPT_STS:%x\n", dev_dbg(&indio_dev->dev, "iproc_adc_interrupt_handler(),INTRPT_STS:%x\n",
intr_status); intr_status);
intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR; intr_channels = (intr_status & IPROC_ADC_INTR_MASK) >> IPROC_ADC_INTR;
@ -566,8 +566,8 @@ static int iproc_adc_probe(struct platform_device *pdev)
} }
ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno, ret = devm_request_threaded_irq(&pdev->dev, adc_priv->irqno,
iproc_adc_interrupt_thread,
iproc_adc_interrupt_handler, iproc_adc_interrupt_handler,
iproc_adc_interrupt_thread,
IRQF_SHARED, "iproc-adc", indio_dev); IRQF_SHARED, "iproc-adc", indio_dev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "request_irq error %d\n", ret); dev_err(&pdev->dev, "request_irq error %d\n", ret);

View file

@ -74,9 +74,9 @@ static const int int_time_mapping[] = {100000, 50000, 200000, 400000};
static const struct reg_field reg_field_it = static const struct reg_field reg_field_it =
REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4); REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4);
static const struct reg_field reg_field_als_intr = static const struct reg_field reg_field_als_intr =
REG_FIELD(LTR501_INTR, 0, 0);
static const struct reg_field reg_field_ps_intr =
REG_FIELD(LTR501_INTR, 1, 1); REG_FIELD(LTR501_INTR, 1, 1);
static const struct reg_field reg_field_ps_intr =
REG_FIELD(LTR501_INTR, 0, 0);
static const struct reg_field reg_field_als_rate = static const struct reg_field reg_field_als_rate =
REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2); REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2);
static const struct reg_field reg_field_ps_rate = static const struct reg_field reg_field_ps_rate =

View file

@ -40,9 +40,9 @@
#define AS3935_AFE_PWR_BIT BIT(0) #define AS3935_AFE_PWR_BIT BIT(0)
#define AS3935_INT 0x03 #define AS3935_INT 0x03
#define AS3935_INT_MASK 0x07 #define AS3935_INT_MASK 0x0f
#define AS3935_EVENT_INT BIT(3) #define AS3935_EVENT_INT BIT(3)
#define AS3935_NOISE_INT BIT(1) #define AS3935_NOISE_INT BIT(0)
#define AS3935_DATA 0x07 #define AS3935_DATA 0x07
#define AS3935_DATA_MASK 0x3F #define AS3935_DATA_MASK 0x3F
@ -215,7 +215,7 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
st->buffer[0] = val & AS3935_DATA_MASK; st->buffer[0] = val & AS3935_DATA_MASK;
iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer, iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
pf->timestamp); iio_get_time_ns(indio_dev));
err_read: err_read:
iio_trigger_notify_done(indio_dev->trig); iio_trigger_notify_done(indio_dev->trig);
@ -244,7 +244,7 @@ static void as3935_event_work(struct work_struct *work)
switch (val) { switch (val) {
case AS3935_EVENT_INT: case AS3935_EVENT_INT:
iio_trigger_poll(st->trig); iio_trigger_poll_chained(st->trig);
break; break;
case AS3935_NOISE_INT: case AS3935_NOISE_INT:
dev_warn(&st->spi->dev, "noise level is too high\n"); dev_warn(&st->spi->dev, "noise level is too high\n");

View file

@ -1118,8 +1118,10 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Asus UX32VD 0x361f02 00, 15, 0e clickpad * Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad * Avatar AVIU-145A2 0x361f00 ? clickpad
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
* Fujitsu LIFEBOOK E557 0x570f01 40, 14, 0c 2 hw buttons
* Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
* Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
@ -1524,6 +1526,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"), DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
}, },
}, },
{
/* Fujitsu LIFEBOOK E546 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"),
},
},
{ {
/* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */ /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
.matches = { .matches = {
@ -1545,6 +1554,13 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"), DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E556"),
}, },
}, },
{
/* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"),
},
},
{ {
/* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */ /* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
.matches = { .matches = {

View file

@ -155,11 +155,8 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
/* Do this outside the status_mutex to avoid a circular dependency with /* Do this outside the status_mutex to avoid a circular dependency with
* the locking in cxl_mmap_fault() */ * the locking in cxl_mmap_fault() */
if (copy_from_user(&work, uwork, if (copy_from_user(&work, uwork, sizeof(work)))
sizeof(struct cxl_ioctl_start_work))) { return -EFAULT;
rc = -EFAULT;
goto out;
}
mutex_lock(&ctx->status_mutex); mutex_lock(&ctx->status_mutex);
if (ctx->status != OPENED) { if (ctx->status != OPENED) {

View file

@ -1066,13 +1066,16 @@ int cxl_native_register_psl_err_irq(struct cxl *adapter)
void cxl_native_release_psl_err_irq(struct cxl *adapter) void cxl_native_release_psl_err_irq(struct cxl *adapter)
{ {
if (adapter->native->err_virq != irq_find_mapping(NULL, adapter->native->err_hwirq)) if (adapter->native->err_virq == 0 ||
adapter->native->err_virq !=
irq_find_mapping(NULL, adapter->native->err_hwirq))
return; return;
cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
cxl_unmap_irq(adapter->native->err_virq, adapter); cxl_unmap_irq(adapter->native->err_virq, adapter);
cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq); cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq);
kfree(adapter->irq_name); kfree(adapter->irq_name);
adapter->native->err_virq = 0;
} }
int cxl_native_register_serr_irq(struct cxl_afu *afu) int cxl_native_register_serr_irq(struct cxl_afu *afu)
@ -1102,13 +1105,15 @@ int cxl_native_register_serr_irq(struct cxl_afu *afu)
void cxl_native_release_serr_irq(struct cxl_afu *afu) void cxl_native_release_serr_irq(struct cxl_afu *afu)
{ {
if (afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq)) if (afu->serr_virq == 0 ||
afu->serr_virq != irq_find_mapping(NULL, afu->serr_hwirq))
return; return;
cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
cxl_unmap_irq(afu->serr_virq, afu); cxl_unmap_irq(afu->serr_virq, afu);
cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq);
kfree(afu->err_irq_name); kfree(afu->err_irq_name);
afu->serr_virq = 0;
} }
int cxl_native_register_psl_irq(struct cxl_afu *afu) int cxl_native_register_psl_irq(struct cxl_afu *afu)
@ -1131,12 +1136,15 @@ int cxl_native_register_psl_irq(struct cxl_afu *afu)
void cxl_native_release_psl_irq(struct cxl_afu *afu) void cxl_native_release_psl_irq(struct cxl_afu *afu)
{ {
if (afu->native->psl_virq != irq_find_mapping(NULL, afu->native->psl_hwirq)) if (afu->native->psl_virq == 0 ||
afu->native->psl_virq !=
irq_find_mapping(NULL, afu->native->psl_hwirq))
return; return;
cxl_unmap_irq(afu->native->psl_virq, afu); cxl_unmap_irq(afu->native->psl_virq, afu);
cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq); cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq);
kfree(afu->psl_irq_name); kfree(afu->psl_irq_name);
afu->native->psl_virq = 0;
} }
static void recover_psl_err(struct cxl_afu *afu, u64 errstat) static void recover_psl_err(struct cxl_afu *afu, u64 errstat)

View file

@ -678,8 +678,10 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
{ {
struct mei_cl_device *cldev = to_mei_cl_device(dev); struct mei_cl_device *cldev = to_mei_cl_device(dev);
const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl); const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
u8 version = mei_me_cl_ver(cldev->me_cl);
return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:", cldev->name, uuid); return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
cldev->name, uuid, version);
} }
static DEVICE_ATTR_RO(modalias); static DEVICE_ATTR_RO(modalias);

View file

@ -1931,7 +1931,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
} }
/* select a non-FCoE queue */ /* select a non-FCoE queue */
return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
} }
void bnx2x_set_num_queues(struct bnx2x *bp) void bnx2x_set_num_queues(struct bnx2x *bp)

View file

@ -2226,10 +2226,14 @@ static int cxgb_up(struct adapter *adap)
if (err) if (err)
goto irq_err; goto irq_err;
} }
mutex_lock(&uld_mutex);
enable_rx(adap); enable_rx(adap);
t4_sge_start(adap); t4_sge_start(adap);
t4_intr_enable(adap); t4_intr_enable(adap);
adap->flags |= FULL_INIT_DONE; adap->flags |= FULL_INIT_DONE;
mutex_unlock(&uld_mutex);
notify_ulds(adap, CXGB4_STATE_UP); notify_ulds(adap, CXGB4_STATE_UP);
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
update_clip(adap); update_clip(adap);

View file

@ -710,6 +710,8 @@ static int ethoc_open(struct net_device *dev)
if (ret) if (ret)
return ret; return ret;
napi_enable(&priv->napi);
ethoc_init_ring(priv, dev->mem_start); ethoc_init_ring(priv, dev->mem_start);
ethoc_reset(priv); ethoc_reset(priv);
@ -722,7 +724,6 @@ static int ethoc_open(struct net_device *dev)
} }
phy_start(dev->phydev); phy_start(dev->phydev);
napi_enable(&priv->napi);
if (netif_msg_ifup(priv)) { if (netif_msg_ifup(priv)) {
dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",

View file

@ -1953,7 +1953,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
0, 1, 0, 1,
(last_segment) && (buff_size < TSO_MAX_BUFF_SIZE), (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
0, 0); 0, 0);
tmp_len -= TSO_MAX_BUFF_SIZE; tmp_len -= TSO_MAX_BUFF_SIZE;

View file

@ -59,6 +59,8 @@ static const u8 all_zeros_mac[ETH_ALEN + 2];
static int vxlan_sock_add(struct vxlan_dev *vxlan); static int vxlan_sock_add(struct vxlan_dev *vxlan);
static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
/* per-network namespace private data for this module */ /* per-network namespace private data for this module */
struct vxlan_net { struct vxlan_net {
struct list_head vxlan_list; struct list_head vxlan_list;
@ -717,6 +719,22 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
call_rcu(&f->rcu, vxlan_fdb_free); call_rcu(&f->rcu, vxlan_fdb_free);
} }
static void vxlan_dst_free(struct rcu_head *head)
{
struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
dst_cache_destroy(&rd->dst_cache);
kfree(rd);
}
static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
struct vxlan_rdst *rd)
{
list_del_rcu(&rd->list);
vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
call_rcu(&rd->rcu, vxlan_dst_free);
}
static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
union vxlan_addr *ip, __be16 *port, __be32 *vni, union vxlan_addr *ip, __be16 *port, __be32 *vni,
u32 *ifindex) u32 *ifindex)
@ -847,9 +865,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
* otherwise destroy the fdb entry * otherwise destroy the fdb entry
*/ */
if (rd && !list_is_singular(&f->remotes)) { if (rd && !list_is_singular(&f->remotes)) {
list_del_rcu(&rd->list); vxlan_fdb_dst_destroy(vxlan, f, rd);
vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
kfree_rcu(rd, rcu);
goto out; goto out;
} }
@ -1026,6 +1042,8 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan)
rcu_assign_pointer(vxlan->vn4_sock, NULL); rcu_assign_pointer(vxlan->vn4_sock, NULL);
synchronize_net(); synchronize_net();
vxlan_vs_del_dev(vxlan);
if (__vxlan_sock_release_prep(sock4)) { if (__vxlan_sock_release_prep(sock4)) {
udp_tunnel_sock_release(sock4->sock); udp_tunnel_sock_release(sock4->sock);
kfree(sock4); kfree(sock4);
@ -2286,6 +2304,15 @@ static void vxlan_cleanup(unsigned long arg)
mod_timer(&vxlan->age_timer, next_timer); mod_timer(&vxlan->age_timer, next_timer);
} }
static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
spin_lock(&vn->sock_lock);
hlist_del_init_rcu(&vxlan->hlist);
spin_unlock(&vn->sock_lock);
}
static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
{ {
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
@ -3056,12 +3083,6 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
static void vxlan_dellink(struct net_device *dev, struct list_head *head) static void vxlan_dellink(struct net_device *dev, struct list_head *head)
{ {
struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
spin_lock(&vn->sock_lock);
if (!hlist_unhashed(&vxlan->hlist))
hlist_del_rcu(&vxlan->hlist);
spin_unlock(&vn->sock_lock);
gro_cells_destroy(&vxlan->gro_cells); gro_cells_destroy(&vxlan->gro_cells);
list_del(&vxlan->next); list_del(&vxlan->next);

View file

@ -721,6 +721,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
return -EIO; return -EIO;
} }
memset(&elreq, 0, sizeof(elreq));
elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev, elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
DMA_TO_DEVICE); DMA_TO_DEVICE);
@ -786,10 +788,9 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if (atomic_read(&vha->loop_state) == LOOP_READY && if (atomic_read(&vha->loop_state) == LOOP_READY &&
(ha->current_topology == ISP_CFG_F || (ha->current_topology == ISP_CFG_F ||
((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) && (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
&& req_data_len == MAX_ELS_FRAME_PAYLOAD)) && elreq.options == EXTERNAL_LOOPBACK) {
elreq.options == EXTERNAL_LOOPBACK) {
type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
ql_dbg(ql_dbg_user, vha, 0x701e, ql_dbg(ql_dbg_user, vha, 0x701e,
"BSG request type: %s.\n", type); "BSG request type: %s.\n", type);

View file

@ -1131,7 +1131,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Mailbox registers. */ /* Mailbox registers. */
mbx_reg = &reg->mailbox0; mbx_reg = &reg->mailbox0;
for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++) for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
/* Transfer sequence registers. */ /* Transfer sequence registers. */
@ -2090,7 +2090,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
/* Mailbox registers. */ /* Mailbox registers. */
mbx_reg = &reg->mailbox0; mbx_reg = &reg->mailbox0;
for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++) for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
/* Transfer sequence registers. */ /* Transfer sequence registers. */

View file

@ -4783,9 +4783,9 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
memset(mcp->mb, 0 , sizeof(mcp->mb)); memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
mcp->mb[1] = mreq->options | BIT_6; /* BIT_6 specifies 64bit address */ /* BIT_6 specifies 64bit address */
mcp->mb[1] = mreq->options | BIT_15 | BIT_6;
if (IS_CNA_CAPABLE(ha)) { if (IS_CNA_CAPABLE(ha)) {
mcp->mb[1] |= BIT_15;
mcp->mb[2] = vha->fcoe_fcf_idx; mcp->mb[2] = vha->fcoe_fcf_idx;
} }
mcp->mb[16] = LSW(mreq->rcv_dma); mcp->mb[16] = LSW(mreq->rcv_dma);

View file

@ -2420,10 +2420,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (mem_only) { if (mem_only) {
if (pci_enable_device_mem(pdev)) if (pci_enable_device_mem(pdev))
goto probe_out; return ret;
} else { } else {
if (pci_enable_device(pdev)) if (pci_enable_device(pdev))
goto probe_out; return ret;
} }
/* This may fail but that's ok */ /* This may fail but that's ok */
@ -2433,7 +2433,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (!ha) { if (!ha) {
ql_log_pci(ql_log_fatal, pdev, 0x0009, ql_log_pci(ql_log_fatal, pdev, 0x0009,
"Unable to allocate memory for ha.\n"); "Unable to allocate memory for ha.\n");
goto probe_out; goto disable_device;
} }
ql_dbg_pci(ql_dbg_init, pdev, 0x000a, ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
"Memory allocated for ha=%p.\n", ha); "Memory allocated for ha=%p.\n", ha);
@ -3039,7 +3039,7 @@ iospace_config_failed:
kfree(ha); kfree(ha);
ha = NULL; ha = NULL;
probe_out: disable_device:
pci_disable_device(pdev); pci_disable_device(pdev);
return ret; return ret;
} }

View file

@ -371,7 +371,7 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
goto done; goto done;
} }
if (end <= start || start == 0 || end == 0) { if (end < start || start == 0 || end == 0) {
ql_dbg(ql_dbg_misc, vha, 0xd023, ql_dbg(ql_dbg_misc, vha, 0xd023,
"%s: unusable range (start=%x end=%x)\n", __func__, "%s: unusable range (start=%x end=%x)\n", __func__,
ent->t262.end_addr, ent->t262.start_addr); ent->t262.end_addr, ent->t262.start_addr);

View file

@ -387,18 +387,10 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
struct lov_mds_md *lmmk = NULL; struct lov_mds_md *lmmk = NULL;
int rc, lmmk_size, lmm_size; int rc, lmmk_size, lmm_size;
int lum_size; int lum_size;
mm_segment_t seg;
if (!lsm) if (!lsm)
return -ENODATA; return -ENODATA;
/*
* "Switch to kernel segment" to allow copying from kernel space by
* copy_{to,from}_user().
*/
seg = get_fs();
set_fs(KERNEL_DS);
/* we only need the header part from user space to get lmm_magic and /* we only need the header part from user space to get lmm_magic and
* lmm_stripe_count, (the header part is common to v1 and v3) * lmm_stripe_count, (the header part is common to v1 and v3)
*/ */
@ -478,6 +470,5 @@ int lov_getstripe(struct lov_object *obj, struct lov_stripe_md *lsm,
out_free: out_free:
kfree(lmmk); kfree(lmmk);
out: out:
set_fs(seg);
return rc; return rc;
} }

View file

@ -1182,15 +1182,28 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
if (cmd->unknown_data_length) { if (cmd->unknown_data_length) {
cmd->data_length = size; cmd->data_length = size;
} else if (size != cmd->data_length) { } else if (size != cmd->data_length) {
pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
" %u does not match SCSI CDB Length: %u for SAM Opcode:" " %u does not match SCSI CDB Length: %u for SAM Opcode:"
" 0x%02x\n", cmd->se_tfo->get_fabric_name(), " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
cmd->data_length, size, cmd->t_task_cdb[0]); cmd->data_length, size, cmd->t_task_cdb[0]);
if (cmd->data_direction == DMA_TO_DEVICE && if (cmd->data_direction == DMA_TO_DEVICE) {
cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
pr_err("Rejecting underflow/overflow WRITE data\n"); pr_err_ratelimited("Rejecting underflow/overflow"
return TCM_INVALID_CDB_FIELD; " for WRITE data CDB\n");
return TCM_INVALID_CDB_FIELD;
}
/*
* Some fabric drivers like iscsi-target still expect to
* always reject overflow writes. Reject this case until
* full fabric driver level support for overflow writes
* is introduced tree-wide.
*/
if (size > cmd->data_length) {
pr_err_ratelimited("Rejecting overflow for"
" WRITE control CDB\n");
return TCM_INVALID_CDB_FIELD;
}
} }
/* /*
* Reject READ_* or WRITE_* with overflow/underflow for * Reject READ_* or WRITE_* with overflow/underflow for

View file

@ -1381,9 +1381,9 @@ static struct spi_driver ifx_spi_driver = {
static void __exit ifx_spi_exit(void) static void __exit ifx_spi_exit(void)
{ {
/* unregister */ /* unregister */
spi_unregister_driver(&ifx_spi_driver);
tty_unregister_driver(tty_drv); tty_unregister_driver(tty_drv);
put_tty_driver(tty_drv); put_tty_driver(tty_drv);
spi_unregister_driver(&ifx_spi_driver);
unregister_reboot_notifier(&ifx_modem_reboot_notifier_block); unregister_reboot_notifier(&ifx_modem_reboot_notifier_block);
} }

View file

@ -1976,12 +1976,14 @@ static int sci_startup(struct uart_port *port)
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
ret = sci_request_irq(s);
if (unlikely(ret < 0))
return ret;
sci_request_dma(port); sci_request_dma(port);
ret = sci_request_irq(s);
if (unlikely(ret < 0)) {
sci_free_dma(port);
return ret;
}
return 0; return 0;
} }
@ -2012,8 +2014,8 @@ static void sci_shutdown(struct uart_port *port)
} }
#endif #endif
sci_free_dma(port);
sci_free_irq(s); sci_free_irq(s);
sci_free_dma(port);
} }
static int sci_sck_calc(struct sci_port *s, unsigned int bps, static int sci_sck_calc(struct sci_port *s, unsigned int bps,

View file

@ -294,7 +294,8 @@ static int ci_role_show(struct seq_file *s, void *data)
{ {
struct ci_hdrc *ci = s->private; struct ci_hdrc *ci = s->private;
seq_printf(s, "%s\n", ci_role(ci)->name); if (ci->role != CI_ROLE_END)
seq_printf(s, "%s\n", ci_role(ci)->name);
return 0; return 0;
} }

View file

@ -1987,6 +1987,7 @@ static void udc_id_switch_for_host(struct ci_hdrc *ci)
int ci_hdrc_gadget_init(struct ci_hdrc *ci) int ci_hdrc_gadget_init(struct ci_hdrc *ci)
{ {
struct ci_role_driver *rdrv; struct ci_role_driver *rdrv;
int ret;
if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC)) if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
return -ENXIO; return -ENXIO;
@ -1999,7 +2000,10 @@ int ci_hdrc_gadget_init(struct ci_hdrc *ci)
rdrv->stop = udc_id_switch_for_host; rdrv->stop = udc_id_switch_for_host;
rdrv->irq = udc_irq; rdrv->irq = udc_irq;
rdrv->name = "gadget"; rdrv->name = "gadget";
ci->roles[CI_ROLE_GADGET] = rdrv;
return udc_start(ci); ret = udc_start(ci);
if (!ret)
ci->roles[CI_ROLE_GADGET] = rdrv;
return ret;
} }

View file

@ -395,7 +395,11 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
/* Caller must hold fsg->lock */ /* Caller must hold fsg->lock */
static void wakeup_thread(struct fsg_common *common) static void wakeup_thread(struct fsg_common *common)
{ {
smp_wmb(); /* ensure the write of bh->state is complete */ /*
* Ensure the reading of thread_wakeup_needed
* and the writing of bh->state are completed
*/
smp_mb();
/* Tell the main thread that something has happened */ /* Tell the main thread that something has happened */
common->thread_wakeup_needed = 1; common->thread_wakeup_needed = 1;
if (common->thread_task) if (common->thread_task)
@ -626,7 +630,12 @@ static int sleep_thread(struct fsg_common *common, bool can_freeze)
} }
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);
common->thread_wakeup_needed = 0; common->thread_wakeup_needed = 0;
smp_rmb(); /* ensure the latest bh->state is visible */
/*
* Ensure the writing of thread_wakeup_needed
* and the reading of bh->state are completed
*/
smp_mb();
return rc; return rc;
} }

View file

@ -335,8 +335,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
st->global_error = 1; st->global_error = 1;
} }
} }
st->va += PAGE_SIZE * nr; st->va += XEN_PAGE_SIZE * nr;
st->index += nr; st->index += nr / XEN_PFN_PER_PAGE;
return 0; return 0;
} }

View file

@ -713,7 +713,7 @@ struct block_device *bdget(dev_t dev)
bdev->bd_contains = NULL; bdev->bd_contains = NULL;
bdev->bd_super = NULL; bdev->bd_super = NULL;
bdev->bd_inode = inode; bdev->bd_inode = inode;
bdev->bd_block_size = (1 << inode->i_blkbits); bdev->bd_block_size = i_blocksize(inode);
bdev->bd_part_count = 0; bdev->bd_part_count = 0;
bdev->bd_invalidated = 0; bdev->bd_invalidated = 0;
inode->i_mode = S_IFBLK; inode->i_mode = S_IFBLK;

View file

@ -3984,6 +3984,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
info->space_info_kobj, "%s", info->space_info_kobj, "%s",
alloc_name(found->flags)); alloc_name(found->flags));
if (ret) { if (ret) {
percpu_counter_destroy(&found->total_bytes_pinned);
kfree(found); kfree(found);
return ret; return ret;
} }

View file

@ -2842,7 +2842,7 @@ static long btrfs_fallocate(struct file *file, int mode,
if (!ret) if (!ret)
ret = btrfs_prealloc_file_range(inode, mode, ret = btrfs_prealloc_file_range(inode, mode,
range->start, range->start,
range->len, 1 << inode->i_blkbits, range->len, i_blocksize(inode),
offset + len, &alloc_hint); offset + len, &alloc_hint);
else else
btrfs_free_reserved_data_space(inode, range->start, btrfs_free_reserved_data_space(inode, range->start,

View file

@ -7435,8 +7435,8 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end)
int found = false; int found = false;
void **pagep = NULL; void **pagep = NULL;
struct page *page = NULL; struct page *page = NULL;
int start_idx; unsigned long start_idx;
int end_idx; unsigned long end_idx;
start_idx = start >> PAGE_SHIFT; start_idx = start >> PAGE_SHIFT;

View file

@ -2353,7 +2353,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping,
loff_t pos, loff_t *bytes) loff_t pos, loff_t *bytes)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
unsigned blocksize = 1 << inode->i_blkbits; unsigned int blocksize = i_blocksize(inode);
struct page *page; struct page *page;
void *fsdata; void *fsdata;
pgoff_t index, curidx; pgoff_t index, curidx;
@ -2433,8 +2433,8 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
get_block_t *get_block, loff_t *bytes) get_block_t *get_block, loff_t *bytes)
{ {
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
unsigned blocksize = 1 << inode->i_blkbits; unsigned int blocksize = i_blocksize(inode);
unsigned zerofrom; unsigned int zerofrom;
int err; int err;
err = cont_expand_zero(file, mapping, pos, bytes); err = cont_expand_zero(file, mapping, pos, bytes);
@ -2796,7 +2796,7 @@ int nobh_truncate_page(struct address_space *mapping,
struct buffer_head map_bh; struct buffer_head map_bh;
int err; int err;
blocksize = 1 << inode->i_blkbits; blocksize = i_blocksize(inode);
length = offset & (blocksize - 1); length = offset & (blocksize - 1);
/* Block boundary? Nothing to do */ /* Block boundary? Nothing to do */
@ -2874,7 +2874,7 @@ int block_truncate_page(struct address_space *mapping,
struct buffer_head *bh; struct buffer_head *bh;
int err; int err;
blocksize = 1 << inode->i_blkbits; blocksize = i_blocksize(inode);
length = offset & (blocksize - 1); length = offset & (blocksize - 1);
/* Block boundary? Nothing to do */ /* Block boundary? Nothing to do */
@ -2986,7 +2986,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block,
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
tmp.b_state = 0; tmp.b_state = 0;
tmp.b_blocknr = 0; tmp.b_blocknr = 0;
tmp.b_size = 1 << inode->i_blkbits; tmp.b_size = i_blocksize(inode);
get_block(inode, block, &tmp, 0); get_block(inode, block, &tmp, 0);
return tmp.b_blocknr; return tmp.b_blocknr;
} }

View file

@ -745,7 +745,7 @@ static int ceph_writepages_start(struct address_space *mapping,
struct pagevec pvec; struct pagevec pvec;
int done = 0; int done = 0;
int rc = 0; int rc = 0;
unsigned wsize = 1 << inode->i_blkbits; unsigned int wsize = i_blocksize(inode);
struct ceph_osd_request *req = NULL; struct ceph_osd_request *req = NULL;
int do_sync = 0; int do_sync = 0;
loff_t snap_size, i_size; loff_t snap_size, i_size;

View file

@ -587,7 +587,7 @@ static int dio_set_defer_completion(struct dio *dio)
/* /*
* Call into the fs to map some more disk blocks. We record the current number * Call into the fs to map some more disk blocks. We record the current number
* of available blocks at sdio->blocks_available. These are in units of the * of available blocks at sdio->blocks_available. These are in units of the
* fs blocksize, (1 << inode->i_blkbits). * fs blocksize, i_blocksize(inode).
* *
* The fs is allowed to map lots of blocks at once. If it wants to do that, * The fs is allowed to map lots of blocks at once. If it wants to do that,
* it uses the passed inode-relative block number as the file offset, as usual. * it uses the passed inode-relative block number as the file offset, as usual.

View file

@ -3413,13 +3413,13 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
struct ext4_sb_info *sbi; struct ext4_sb_info *sbi;
struct ext4_extent_header *eh; struct ext4_extent_header *eh;
struct ext4_map_blocks split_map; struct ext4_map_blocks split_map;
struct ext4_extent zero_ex; struct ext4_extent zero_ex1, zero_ex2;
struct ext4_extent *ex, *abut_ex; struct ext4_extent *ex, *abut_ex;
ext4_lblk_t ee_block, eof_block; ext4_lblk_t ee_block, eof_block;
unsigned int ee_len, depth, map_len = map->m_len; unsigned int ee_len, depth, map_len = map->m_len;
int allocated = 0, max_zeroout = 0; int allocated = 0, max_zeroout = 0;
int err = 0; int err = 0;
int split_flag = 0; int split_flag = EXT4_EXT_DATA_VALID2;
ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
"block %llu, max_blocks %u\n", inode->i_ino, "block %llu, max_blocks %u\n", inode->i_ino,
@ -3436,7 +3436,8 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
ex = path[depth].p_ext; ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block); ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex); ee_len = ext4_ext_get_actual_len(ex);
zero_ex.ee_len = 0; zero_ex1.ee_len = 0;
zero_ex2.ee_len = 0;
trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
@ -3576,62 +3577,52 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
if (ext4_encrypted_inode(inode)) if (ext4_encrypted_inode(inode))
max_zeroout = 0; max_zeroout = 0;
/* If extent is less than s_max_zeroout_kb, zeroout directly */
if (max_zeroout && (ee_len <= max_zeroout)) {
err = ext4_ext_zeroout(inode, ex);
if (err)
goto out;
zero_ex.ee_block = ex->ee_block;
zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
err = ext4_ext_get_access(handle, inode, path + depth);
if (err)
goto out;
ext4_ext_mark_initialized(ex);
ext4_ext_try_to_merge(handle, inode, path, ex);
err = ext4_ext_dirty(handle, inode, path + path->p_depth);
goto out;
}
/* /*
* four cases: * five cases:
* 1. split the extent into three extents. * 1. split the extent into three extents.
* 2. split the extent into two extents, zeroout the first half. * 2. split the extent into two extents, zeroout the head of the first
* 3. split the extent into two extents, zeroout the second half. * extent.
* 3. split the extent into two extents, zeroout the tail of the second
* extent.
* 4. split the extent into two extents with out zeroout. * 4. split the extent into two extents with out zeroout.
* 5. no splitting needed, just possibly zeroout the head and / or the
* tail of the extent.
*/ */
split_map.m_lblk = map->m_lblk; split_map.m_lblk = map->m_lblk;
split_map.m_len = map->m_len; split_map.m_len = map->m_len;
if (max_zeroout && (allocated > map->m_len)) { if (max_zeroout && (allocated > split_map.m_len)) {
if (allocated <= max_zeroout) { if (allocated <= max_zeroout) {
/* case 3 */ /* case 3 or 5 */
zero_ex.ee_block = zero_ex1.ee_block =
cpu_to_le32(map->m_lblk); cpu_to_le32(split_map.m_lblk +
zero_ex.ee_len = cpu_to_le16(allocated); split_map.m_len);
ext4_ext_store_pblock(&zero_ex, zero_ex1.ee_len =
ext4_ext_pblock(ex) + map->m_lblk - ee_block); cpu_to_le16(allocated - split_map.m_len);
err = ext4_ext_zeroout(inode, &zero_ex); ext4_ext_store_pblock(&zero_ex1,
ext4_ext_pblock(ex) + split_map.m_lblk +
split_map.m_len - ee_block);
err = ext4_ext_zeroout(inode, &zero_ex1);
if (err) if (err)
goto out; goto out;
split_map.m_lblk = map->m_lblk;
split_map.m_len = allocated; split_map.m_len = allocated;
} else if (map->m_lblk - ee_block + map->m_len < max_zeroout) { }
/* case 2 */ if (split_map.m_lblk - ee_block + split_map.m_len <
if (map->m_lblk != ee_block) { max_zeroout) {
zero_ex.ee_block = ex->ee_block; /* case 2 or 5 */
zero_ex.ee_len = cpu_to_le16(map->m_lblk - if (split_map.m_lblk != ee_block) {
zero_ex2.ee_block = ex->ee_block;
zero_ex2.ee_len = cpu_to_le16(split_map.m_lblk -
ee_block); ee_block);
ext4_ext_store_pblock(&zero_ex, ext4_ext_store_pblock(&zero_ex2,
ext4_ext_pblock(ex)); ext4_ext_pblock(ex));
err = ext4_ext_zeroout(inode, &zero_ex); err = ext4_ext_zeroout(inode, &zero_ex2);
if (err) if (err)
goto out; goto out;
} }
split_map.m_len += split_map.m_lblk - ee_block;
split_map.m_lblk = ee_block; split_map.m_lblk = ee_block;
split_map.m_len = map->m_lblk - ee_block + map->m_len;
allocated = map->m_len; allocated = map->m_len;
} }
} }
@ -3642,8 +3633,11 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
err = 0; err = 0;
out: out:
/* If we have gotten a failure, don't zero out status tree */ /* If we have gotten a failure, don't zero out status tree */
if (!err) if (!err) {
err = ext4_zeroout_es(inode, &zero_ex); err = ext4_zeroout_es(inode, &zero_ex1);
if (!err)
err = ext4_zeroout_es(inode, &zero_ex2);
}
return err ? err : allocated; return err ? err : allocated;
} }
@ -4893,6 +4887,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
/* Zero out partial block at the edges of the range */ /* Zero out partial block at the edges of the range */
ret = ext4_zero_partial_blocks(handle, inode, offset, len); ret = ext4_zero_partial_blocks(handle, inode, offset, len);
if (ret >= 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
if (file->f_flags & O_SYNC) if (file->f_flags & O_SYNC)
ext4_handle_sync(handle); ext4_handle_sync(handle);
@ -5579,6 +5575,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
ext4_handle_sync(handle); ext4_handle_sync(handle);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode); inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode); ext4_mark_inode_dirty(handle, inode);
ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop: out_stop:
ext4_journal_stop(handle); ext4_journal_stop(handle);
@ -5752,6 +5749,8 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
up_write(&EXT4_I(inode)->i_data_sem); up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode)) if (IS_SYNC(inode))
ext4_handle_sync(handle); ext4_handle_sync(handle);
if (ret >= 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop: out_stop:
ext4_journal_stop(handle); ext4_journal_stop(handle);

View file

@ -432,47 +432,27 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
num = min_t(pgoff_t, end - index, PAGEVEC_SIZE); num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
(pgoff_t)num); (pgoff_t)num);
if (nr_pages == 0) { if (nr_pages == 0)
if (whence == SEEK_DATA)
break;
BUG_ON(whence != SEEK_HOLE);
/*
* If this is the first time to go into the loop and
* offset is not beyond the end offset, it will be a
* hole at this offset
*/
if (lastoff == startoff || lastoff < endoff)
found = 1;
break; break;
}
/*
* If this is the first time to go into the loop and
* offset is smaller than the first page offset, it will be a
* hole at this offset.
*/
if (lastoff == startoff && whence == SEEK_HOLE &&
lastoff < page_offset(pvec.pages[0])) {
found = 1;
break;
}
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
/* /*
* If the current offset is not beyond the end of given * If current offset is smaller than the page offset,
* range, it will be a hole. * there is a hole at this offset.
*/ */
if (lastoff < endoff && whence == SEEK_HOLE && if (whence == SEEK_HOLE && lastoff < endoff &&
page->index > end) { lastoff < page_offset(pvec.pages[i])) {
found = 1; found = 1;
*offset = lastoff; *offset = lastoff;
goto out; goto out;
} }
if (page->index > end)
goto out;
lock_page(page); lock_page(page);
if (unlikely(page->mapping != inode->i_mapping)) { if (unlikely(page->mapping != inode->i_mapping)) {
@ -512,20 +492,18 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
unlock_page(page); unlock_page(page);
} }
/* /* The no. of pages is less than our desired, we are done. */
* The no. of pages is less than our desired, that would be a if (nr_pages < num)
* hole in there.
*/
if (nr_pages < num && whence == SEEK_HOLE) {
found = 1;
*offset = lastoff;
break; break;
}
index = pvec.pages[i - 1]->index + 1; index = pvec.pages[i - 1]->index + 1;
pagevec_release(&pvec); pagevec_release(&pvec);
} while (index <= end); } while (index <= end);
if (whence == SEEK_HOLE && lastoff < endoff) {
found = 1;
*offset = lastoff;
}
out: out:
pagevec_release(&pvec); pagevec_release(&pvec);
return found; return found;

View file

@ -2218,7 +2218,7 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
{ {
struct inode *inode = mpd->inode; struct inode *inode = mpd->inode;
int err; int err;
ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1) ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
>> inode->i_blkbits; >> inode->i_blkbits;
do { do {
@ -3478,14 +3478,14 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
* writes need zeroing either because they can race with page * writes need zeroing either because they can race with page
* faults or because they use partial blocks. * faults or because they use partial blocks.
*/ */
if (round_down(offset, 1<<inode->i_blkbits) >= inode->i_size && if (round_down(offset, i_blocksize(inode)) >= inode->i_size &&
ext4_aligned_io(inode, offset, count)) ext4_aligned_io(inode, offset, count))
get_block_func = ext4_dio_get_block; get_block_func = ext4_dio_get_block;
else else
get_block_func = ext4_dax_get_block; get_block_func = ext4_dax_get_block;
dio_flags = DIO_LOCKING; dio_flags = DIO_LOCKING;
} else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) || } else if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
round_down(offset, 1 << inode->i_blkbits) >= inode->i_size) { round_down(offset, i_blocksize(inode)) >= inode->i_size) {
get_block_func = ext4_dio_get_block; get_block_func = ext4_dio_get_block;
dio_flags = DIO_LOCKING | DIO_SKIP_HOLES; dio_flags = DIO_LOCKING | DIO_SKIP_HOLES;
} else if (is_sync_kiocb(iocb)) { } else if (is_sync_kiocb(iocb)) {
@ -4099,6 +4099,8 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
inode->i_mtime = inode->i_ctime = ext4_current_time(inode); inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode); ext4_mark_inode_dirty(handle, inode);
if (ret >= 0)
ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop: out_stop:
ext4_journal_stop(handle); ext4_journal_stop(handle);
out_dio: out_dio:
@ -5101,7 +5103,7 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
* do. We do the check mainly to optimize the common PAGE_SIZE == * do. We do the check mainly to optimize the common PAGE_SIZE ==
* blocksize case * blocksize case
*/ */
if (offset > PAGE_SIZE - (1 << inode->i_blkbits)) if (offset > PAGE_SIZE - i_blocksize(inode))
return; return;
while (1) { while (1) {
page = find_lock_page(inode->i_mapping, page = find_lock_page(inode->i_mapping,
@ -5496,8 +5498,9 @@ static int ext4_expand_extra_isize(struct inode *inode,
/* No extended attributes present */ /* No extended attributes present */
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) || if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) { header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0, memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
new_extra_isize); EXT4_I(inode)->i_extra_isize, 0,
new_extra_isize - EXT4_I(inode)->i_extra_isize);
EXT4_I(inode)->i_extra_isize = new_extra_isize; EXT4_I(inode)->i_extra_isize = new_extra_isize;
return 0; return 0;
} }

View file

@ -838,7 +838,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
inode = page->mapping->host; inode = page->mapping->host;
sb = inode->i_sb; sb = inode->i_sb;
ngroups = ext4_get_groups_count(sb); ngroups = ext4_get_groups_count(sb);
blocksize = 1 << inode->i_blkbits; blocksize = i_blocksize(inode);
blocks_per_page = PAGE_SIZE / blocksize; blocks_per_page = PAGE_SIZE / blocksize;
groups_per_page = blocks_per_page >> 1; groups_per_page = blocks_per_page >> 1;

View file

@ -187,7 +187,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to)
if (PageUptodate(page)) if (PageUptodate(page))
return 0; return 0;
blocksize = 1 << inode->i_blkbits; blocksize = i_blocksize(inode);
if (!page_has_buffers(page)) if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0); create_empty_buffers(page, blocksize, 0);

View file

@ -419,8 +419,8 @@ int
iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
struct iomap_ops *ops) struct iomap_ops *ops)
{ {
unsigned blocksize = (1 << inode->i_blkbits); unsigned int blocksize = i_blocksize(inode);
unsigned off = pos & (blocksize - 1); unsigned int off = pos & (blocksize - 1);
/* Block boundary? Nothing to do */ /* Block boundary? Nothing to do */
if (!off) if (!off)

Some files were not shown because too many files have changed in this diff Show more