This is the 4.9.188 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAl1JqvYACgkQONu9yGCS
 aT78HA//TVAfvVC0NqHbLFIIgkkZ1ygQc9A7K0hhkdCvZXcwSQgaxWO3uhHOeVNz
 yK2RS5GqxsNQaSnsmO66cfb6+B1YzmnRuXbZsKU9AcA+rs5rhRGZx1Ax+nMxbpeC
 zQ6Yi9aATa+R5O/yt3emtxL/Ml7W82sb0kE/o2YGJBDamiOmWEAnf6ROTz9euzZ3
 bm2ifHNDiN5EG49J6PgQLXGNca9YqxFmpH6bts+mdNIBmv1M46QyD4nMtke3q4fz
 XDLy9RoPXvbUt9i+xRf18Z5lHMSMqdS9+rU7iOK5E3mir9slWqTNLHN6B7dObvH3
 lmICvdYUo+CucVHMHlihUNtPWpw4q1Ml5f8gY9p9qNE7WNErbt8Do9hCi7RKEpwL
 o1jIVoJcWdeElbTsMMOnqnypOX54JsE4NrValuuuRuC8LqqsFe6AxbOoqouQHzqW
 TOuJFw6thJlYXhsZCNciNLSlyD7PbobeBuZ+6DSfeGhqmGR0/5+Aq9bbZmxsLd0p
 OMsnwkiboYBhTn/IaC0jpIVwqcI+NvKQG0fwlAR0KqILwLrvbe62WMQS7OxoKxix
 4fj5B1tZMHKWKMdE0GPFE0f2bfVMJMMnF+ra8ilgLLI1cuOPGwjXHpT0Nio9Uqtb
 UQ2Q9R8MpxUCYkOLy/22U0cNrc29ND9dzp9mkaGpDH0PJUwVg2s=
 =i+NY
 -----END PGP SIGNATURE-----

Merge 4.9.188 into android-4.9-q

Changes in 4.9.188
	ARM: riscpc: fix DMA
	ARM: dts: rockchip: Make rk3288-veyron-minnie run at hs200
	ARM: dts: rockchip: Make rk3288-veyron-mickey's emmc work again
	ARM: dts: rockchip: Mark that the rk3288 timer might stop in suspend
	ftrace: Enable trampoline when rec count returns back to one
	kernel/module.c: Only return -EEXIST for modules that have finished loading
	MIPS: lantiq: Fix bitfield masking
	dmaengine: rcar-dmac: Reject zero-length slave DMA requests
	fs/adfs: super: fix use-after-free bug
	btrfs: fix minimum number of chunk errors for DUP
	ceph: fix improper use of smp_mb__before_atomic()
	ceph: return -ERANGE if virtual xattr value didn't fit in buffer
	scsi: zfcp: fix GCC compiler warning emitted with -Wmaybe-uninitialized
	ACPI: fix false-positive -Wuninitialized warning
	be2net: Signal that the device cannot transmit during reconfiguration
	x86/apic: Silence -Wtype-limits compiler warnings
	x86: math-emu: Hide clang warnings for 16-bit overflow
	mm/cma.c: fail if fixed declaration can't be honored
	coda: add error handling for fget
	coda: fix build using bare-metal toolchain
	uapi linux/coda_psdev.h: move upc_req definition from uapi to kernel side headers
	drivers/rapidio/devices/rio_mport_cdev.c: NUL terminate some strings
	ipc/mqueue.c: only perform resource calculation if user valid
	x86/kvm: Don't call kvm_spurious_fault() from .fixup
	x86, boot: Remove multiple copy of static function sanitize_boot_params()
	kbuild: initialize CLANG_FLAGS correctly in the top Makefile
	Btrfs: fix incremental send failure after deduplication
	mmc: dw_mmc: Fix occasional hang after tuning on eMMC
	gpiolib: fix incorrect IRQ requesting of an active-low lineevent
	selinux: fix memory leak in policydb_init()
	s390/dasd: fix endless loop after read unit address configuration
	drivers/perf: arm_pmu: Fix failure path in PM notifier
	xen/swiotlb: fix condition for calling xen_destroy_contiguous_region()
	IB/mlx5: Fix RSS Toeplitz setup to be aligned with the HW specification
	coredump: fix race condition between mmget_not_zero()/get_task_mm() and core dumping
	infiniband: fix race condition between infiniband mlx4, mlx5 driver and core dumping
	coredump: fix race condition between collapse_huge_page() and core dumping
	eeprom: at24: make spd world-readable again
	Backport minimal compiler_attributes.h to support GCC 9
	include/linux/module.h: copy __init/__exit attrs to init/cleanup_module
	objtool: Support GCC 9 cold subfunction naming scheme
	x86, mm, gup: prevent get_page() race with munmap in paravirt guest
	Linux 4.9.188

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2019-08-06 18:45:16 +02:00
commit 4ebd29edaf
50 changed files with 287 additions and 174 deletions

View file

@ -1,6 +1,6 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 9 PATCHLEVEL = 9
SUBLEVEL = 187 SUBLEVEL = 188
EXTRAVERSION = EXTRAVERSION =
NAME = Roaring Lionus NAME = Roaring Lionus
@ -401,6 +401,7 @@ KBUILD_AFLAGS_MODULE := -DMODULE
KBUILD_CFLAGS_MODULE := -DMODULE KBUILD_CFLAGS_MODULE := -DMODULE
KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
GCC_PLUGINS_CFLAGS := GCC_PLUGINS_CFLAGS :=
CLANG_FLAGS :=
# Read KERNELRELEASE from include/config/kernel.release (if it exists) # Read KERNELRELEASE from include/config/kernel.release (if it exists)
KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
@ -508,7 +509,7 @@ endif
ifeq ($(cc-name),clang) ifeq ($(cc-name),clang)
ifneq ($(CROSS_COMPILE),) ifneq ($(CROSS_COMPILE),)
CLANG_TRIPLE ?= $(CROSS_COMPILE) CLANG_TRIPLE ?= $(CROSS_COMPILE)
CLANG_FLAGS := --target=$(notdir $(CLANG_TRIPLE:%-=%)) CLANG_FLAGS += --target=$(notdir $(CLANG_TRIPLE:%-=%))
ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_FLAGS)), y) ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_FLAGS)), y)
$(error "Clang with Android --target detected. Did you specify CLANG_TRIPLE?") $(error "Clang with Android --target detected. Did you specify CLANG_TRIPLE?")
endif endif

View file

@ -161,10 +161,6 @@
}; };
}; };
&emmc {
/delete-property/mmc-hs200-1_8v;
};
&i2c2 { &i2c2 {
status = "disabled"; status = "disabled";
}; };

View file

@ -125,10 +125,6 @@
power-supply = <&backlight_regulator>; power-supply = <&backlight_regulator>;
}; };
&emmc {
/delete-property/mmc-hs200-1_8v;
};
&gpio_keys { &gpio_keys {
pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>; pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>;

View file

@ -210,6 +210,7 @@
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>, <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
clock-frequency = <24000000>; clock-frequency = <24000000>;
arm,no-tick-in-suspend;
}; };
timer: timer@ff810000 { timer: timer@ff810000 {

View file

@ -131,7 +131,7 @@ static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
} while (1); } while (1);
idma->state = ~DMA_ST_AB; idma->state = ~DMA_ST_AB;
disable_irq(irq); disable_irq_nosync(irq);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -174,6 +174,9 @@ static void iomd_enable_dma(unsigned int chan, dma_t *dma)
DMA_FROM_DEVICE : DMA_TO_DEVICE); DMA_FROM_DEVICE : DMA_TO_DEVICE);
} }
idma->dma_addr = idma->dma.sg->dma_address;
idma->dma_len = idma->dma.sg->length;
iomd_writeb(DMA_CR_C, dma_base + CR); iomd_writeb(DMA_CR_C, dma_base + CR);
idma->state = DMA_ST_AB; idma->state = DMA_ST_AB;
} }

View file

@ -160,8 +160,9 @@ static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
if (edge) if (edge)
irq_set_handler(d->hwirq, handle_edge_irq); irq_set_handler(d->hwirq, handle_edge_irq);
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
(val << (i * 4)), LTQ_EIU_EXIN_C); (~(7 << (i * 4)))) | (val << (i * 4)),
LTQ_EIU_EXIN_C);
} }
} }

View file

@ -15,6 +15,7 @@
#include "error.h" #include "error.h"
#include "../string.h" #include "../string.h"
#include "../voffset.h" #include "../voffset.h"
#include <asm/bootparam_utils.h>
/* /*
* WARNING!! * WARNING!!

View file

@ -19,7 +19,6 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/boot.h> #include <asm/boot.h>
#include <asm/bootparam.h> #include <asm/bootparam.h>
#include <asm/bootparam_utils.h>
#define BOOT_BOOT_H #define BOOT_BOOT_H
#include "../ctype.h" #include "../ctype.h"

View file

@ -50,7 +50,7 @@ static inline void generic_apic_probe(void)
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
extern unsigned int apic_verbosity; extern int apic_verbosity;
extern int local_apic_timer_c2_ok; extern int local_apic_timer_c2_ok;
extern int disable_apic; extern int disable_apic;

View file

@ -1309,25 +1309,29 @@ enum {
#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm) #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
asmlinkage void __noreturn kvm_spurious_fault(void);
/* /*
* Hardware virtualization extension instructions may fault if a * Hardware virtualization extension instructions may fault if a
* reboot turns off virtualization while processes are running. * reboot turns off virtualization while processes are running.
* Trap the fault and ignore the instruction if that happens. * Usually after catching the fault we just panic; during reboot
* instead the instruction is ignored.
*/ */
asmlinkage void kvm_spurious_fault(void); #define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
"666: \n\t" \
#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \ insn "\n\t" \
"666: " insn "\n\t" \ "jmp 668f \n\t" \
"668: \n\t" \ "667: \n\t" \
".pushsection .fixup, \"ax\" \n" \ "call kvm_spurious_fault \n\t" \
"667: \n\t" \ "668: \n\t" \
cleanup_insn "\n\t" \ ".pushsection .fixup, \"ax\" \n\t" \
"cmpb $0, kvm_rebooting \n\t" \ "700: \n\t" \
"jne 668b \n\t" \ cleanup_insn "\n\t" \
__ASM_SIZE(push) " $666b \n\t" \ "cmpb $0, kvm_rebooting\n\t" \
"jmp kvm_spurious_fault \n\t" \ "je 667b \n\t" \
".popsection \n\t" \ "jmp 668b \n\t" \
_ASM_EXTABLE(666b, 667b) ".popsection \n\t" \
_ASM_EXTABLE(666b, 700b)
#define __kvm_handle_fault_on_reboot(insn) \ #define __kvm_handle_fault_on_reboot(insn) \
____kvm_handle_fault_on_reboot(insn, "") ____kvm_handle_fault_on_reboot(insn, "")

View file

@ -183,7 +183,7 @@ int first_system_vector = FIRST_SYSTEM_VECTOR;
/* /*
* Debug level, exported for io_apic.c * Debug level, exported for io_apic.c
*/ */
unsigned int apic_verbosity; int apic_verbosity;
int pic_mode; int pic_mode;

View file

@ -176,7 +176,7 @@ static inline void reg_copy(FPU_REG const *x, FPU_REG *y)
#define setexponentpos(x,y) { (*(short *)&((x)->exp)) = \ #define setexponentpos(x,y) { (*(short *)&((x)->exp)) = \
((y) + EXTENDED_Ebias) & 0x7fff; } ((y) + EXTENDED_Ebias) & 0x7fff; }
#define exponent16(x) (*(short *)&((x)->exp)) #define exponent16(x) (*(short *)&((x)->exp))
#define setexponent16(x,y) { (*(short *)&((x)->exp)) = (y); } #define setexponent16(x,y) { (*(short *)&((x)->exp)) = (u16)(y); }
#define addexponent(x,y) { (*(short *)&((x)->exp)) += (y); } #define addexponent(x,y) { (*(short *)&((x)->exp)) += (y); }
#define stdexp(x) { (*(short *)&((x)->exp)) += EXTENDED_Ebias; } #define stdexp(x) { (*(short *)&((x)->exp)) += EXTENDED_Ebias; }

View file

@ -17,7 +17,7 @@
#include "control_w.h" #include "control_w.h"
#define MAKE_REG(s, e, l, h) { l, h, \ #define MAKE_REG(s, e, l, h) { l, h, \
((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) } (u16)((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
FPU_REG const CONST_1 = MAKE_REG(POS, 0, 0x00000000, 0x80000000); FPU_REG const CONST_1 = MAKE_REG(POS, 0, 0x00000000, 0x80000000);
#if 0 #if 0

View file

@ -97,6 +97,20 @@ static inline int pte_allows_gup(unsigned long pteval, int write)
return 1; return 1;
} }
/*
* Return the compund head page with ref appropriately incremented,
* or NULL if that failed.
*/
static inline struct page *try_get_compound_head(struct page *page, int refs)
{
struct page *head = compound_head(page);
if (WARN_ON_ONCE(page_ref_count(head) < 0))
return NULL;
if (unlikely(!page_cache_add_speculative(head, refs)))
return NULL;
return head;
}
/* /*
* The performance critical leaf functions are made noinline otherwise gcc * The performance critical leaf functions are made noinline otherwise gcc
* inlines everything into a single function which results in too much * inlines everything into a single function which results in too much
@ -112,7 +126,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
ptep = pte_offset_map(&pmd, addr); ptep = pte_offset_map(&pmd, addr);
do { do {
pte_t pte = gup_get_pte(ptep); pte_t pte = gup_get_pte(ptep);
struct page *page; struct page *head, *page;
/* Similar to the PMD case, NUMA hinting must take slow path */ /* Similar to the PMD case, NUMA hinting must take slow path */
if (pte_protnone(pte)) { if (pte_protnone(pte)) {
@ -138,7 +152,21 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
} }
VM_BUG_ON(!pfn_valid(pte_pfn(pte))); VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = pte_page(pte); page = pte_page(pte);
get_page(page);
head = try_get_compound_head(page, 1);
if (!head) {
put_dev_pagemap(pgmap);
pte_unmap(ptep);
return 0;
}
if (unlikely(pte_val(pte) != pte_val(*ptep))) {
put_page(head);
put_dev_pagemap(pgmap);
pte_unmap(ptep);
return 0;
}
put_dev_pagemap(pgmap); put_dev_pagemap(pgmap);
SetPageReferenced(page); SetPageReferenced(page);
pages[*nr] = page; pages[*nr] = page;

View file

@ -1111,7 +1111,7 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
/* Someone calling slave DMA on a generic channel? */ /* Someone calling slave DMA on a generic channel? */
if (rchan->mid_rid < 0 || !sg_len) { if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
dev_warn(chan->device->dev, dev_warn(chan->device->dev,
"%s: bad parameter: len=%d, id=%d\n", "%s: bad parameter: len=%d, id=%d\n",
__func__, sg_len, rchan->mid_rid); __func__, sg_len, rchan->mid_rid);

View file

@ -817,9 +817,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
} }
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE) if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
irqflags |= IRQF_TRIGGER_RISING; irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE) if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
irqflags |= IRQF_TRIGGER_FALLING; irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT; irqflags |= IRQF_ONESHOT;
irqflags |= IRQF_SHARED; irqflags |= IRQF_SHARED;

View file

@ -1172,6 +1172,8 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
* mlx4_ib_vma_close(). * mlx4_ib_vma_close().
*/ */
down_write(&owning_mm->mmap_sem); down_write(&owning_mm->mmap_sem);
if (!mmget_still_valid(owning_mm))
goto skip_mm;
for (i = 0; i < HW_BAR_COUNT; i++) { for (i = 0; i < HW_BAR_COUNT; i++) {
vma = context->hw_bar_info[i].vma; vma = context->hw_bar_info[i].vma;
if (!vma) if (!vma)
@ -1190,7 +1192,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
/* context going to be destroyed, should not access ops any more */ /* context going to be destroyed, should not access ops any more */
context->hw_bar_info[i].vma->vm_ops = NULL; context->hw_bar_info[i].vma->vm_ops = NULL;
} }
skip_mm:
up_write(&owning_mm->mmap_sem); up_write(&owning_mm->mmap_sem);
mmput(owning_mm); mmput(owning_mm);
put_task_struct(owning_process); put_task_struct(owning_process);

View file

@ -1307,6 +1307,8 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
* mlx5_ib_vma_close. * mlx5_ib_vma_close.
*/ */
down_write(&owning_mm->mmap_sem); down_write(&owning_mm->mmap_sem);
if (!mmget_still_valid(owning_mm))
goto skip_mm;
list_for_each_entry_safe(vma_private, n, &context->vma_private_list, list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
list) { list) {
vma = vma_private->vma; vma = vma_private->vma;
@ -1321,6 +1323,7 @@ static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
list_del(&vma_private->list); list_del(&vma_private->list);
kfree(vma_private); kfree(vma_private);
} }
skip_mm:
up_write(&owning_mm->mmap_sem); up_write(&owning_mm->mmap_sem);
mmput(owning_mm); mmput(owning_mm);
put_task_struct(owning_process); put_task_struct(owning_process);

View file

@ -1421,7 +1421,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
} }
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ); MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
memcpy(rss_key, ucmd.rx_hash_key, len); memcpy(rss_key, ucmd.rx_hash_key, len);
break; break;
} }

View file

@ -777,7 +777,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
at24->nvmem_config.name = dev_name(&client->dev); at24->nvmem_config.name = dev_name(&client->dev);
at24->nvmem_config.dev = &client->dev; at24->nvmem_config.dev = &client->dev;
at24->nvmem_config.read_only = !writable; at24->nvmem_config.read_only = !writable;
at24->nvmem_config.root_only = true; at24->nvmem_config.root_only = !(chip.flags & AT24_FLAG_IRUGO);
at24->nvmem_config.owner = THIS_MODULE; at24->nvmem_config.owner = THIS_MODULE;
at24->nvmem_config.compat = true; at24->nvmem_config.compat = true;
at24->nvmem_config.base_dev = &client->dev; at24->nvmem_config.base_dev = &client->dev;

View file

@ -1864,8 +1864,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
* delayed. Allowing the transfer to take place * delayed. Allowing the transfer to take place
* avoids races and keeps things simple. * avoids races and keeps things simple.
*/ */
if ((err != -ETIMEDOUT) && if (err != -ETIMEDOUT) {
(cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
state = STATE_SENDING_DATA; state = STATE_SENDING_DATA;
continue; continue;
} }

View file

@ -4701,8 +4701,12 @@ int be_update_queues(struct be_adapter *adapter)
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
int status; int status;
if (netif_running(netdev)) if (netif_running(netdev)) {
/* device cannot transmit now, avoid dev_watchdog timeouts */
netif_carrier_off(netdev);
be_close(netdev); be_close(netdev);
}
be_cancel_worker(adapter); be_cancel_worker(adapter);

View file

@ -804,8 +804,8 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
cpu_pm_pmu_setup(armpmu, cmd); cpu_pm_pmu_setup(armpmu, cmd);
break; break;
case CPU_PM_EXIT: case CPU_PM_EXIT:
cpu_pm_pmu_setup(armpmu, cmd);
case CPU_PM_ENTER_FAILED: case CPU_PM_ENTER_FAILED:
cpu_pm_pmu_setup(armpmu, cmd);
armpmu->start(armpmu); armpmu->start(armpmu);
break; break;
default: default:

View file

@ -1743,6 +1743,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
if (copy_from_user(&dev_info, arg, sizeof(dev_info))) if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
return -EFAULT; return -EFAULT;
dev_info.name[sizeof(dev_info.name) - 1] = '\0';
rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name, rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
dev_info.comptag, dev_info.destid, dev_info.hopcount); dev_info.comptag, dev_info.destid, dev_info.hopcount);
@ -1874,6 +1875,7 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
if (copy_from_user(&dev_info, arg, sizeof(dev_info))) if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
return -EFAULT; return -EFAULT;
dev_info.name[sizeof(dev_info.name) - 1] = '\0';
mport = priv->md->mport; mport = priv->md->mport;

View file

@ -382,6 +382,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
char msg_format; char msg_format;
char msg_no; char msg_no;
/*
* intrc values ENODEV, ENOLINK and EPERM
* will be optained from sleep_on to indicate that no
* IO operation can be started
*/
if (cqr->intrc == -ENODEV)
return 1;
if (cqr->intrc == -ENOLINK)
return 1;
if (cqr->intrc == -EPERM)
return 1;
sense = dasd_get_sense(&cqr->irb); sense = dasd_get_sense(&cqr->irb);
if (!sense) if (!sense)
return 0; return 0;
@ -446,12 +460,8 @@ static int read_unit_address_configuration(struct dasd_device *device,
lcu->flags &= ~NEED_UAC_UPDATE; lcu->flags &= ~NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags); spin_unlock_irqrestore(&lcu->lock, flags);
do { rc = dasd_sleep_on(cqr);
rc = dasd_sleep_on(cqr); if (rc && !suborder_not_supported(cqr)) {
if (rc && suborder_not_supported(cqr))
return -EOPNOTSUPP;
} while (rc && (cqr->retries > 0));
if (rc) {
spin_lock_irqsave(&lcu->lock, flags); spin_lock_irqsave(&lcu->lock, flags);
lcu->flags |= NEED_UAC_UPDATE; lcu->flags |= NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags); spin_unlock_irqrestore(&lcu->lock, flags);

View file

@ -10,6 +10,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/bug.h>
#include "zfcp_ext.h" #include "zfcp_ext.h"
#include "zfcp_reqlist.h" #include "zfcp_reqlist.h"
@ -244,6 +245,12 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
struct zfcp_erp_action *erp_action; struct zfcp_erp_action *erp_action;
struct zfcp_scsi_dev *zfcp_sdev; struct zfcp_scsi_dev *zfcp_sdev;
if (WARN_ON_ONCE(need != ZFCP_ERP_ACTION_REOPEN_LUN &&
need != ZFCP_ERP_ACTION_REOPEN_PORT &&
need != ZFCP_ERP_ACTION_REOPEN_PORT_FORCED &&
need != ZFCP_ERP_ACTION_REOPEN_ADAPTER))
return NULL;
switch (need) { switch (need) {
case ZFCP_ERP_ACTION_REOPEN_LUN: case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(sdev); zfcp_sdev = sdev_to_zfcp(sdev);

View file

@ -365,8 +365,8 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
/* Convert the size to actually allocated. */ /* Convert the size to actually allocated. */
size = 1UL << (order + XEN_PAGE_SHIFT); size = 1UL << (order + XEN_PAGE_SHIFT);
if (((dev_addr + size - 1 <= dma_mask)) || if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
range_straddles_page_boundary(phys, size)) range_straddles_page_boundary(phys, size)))
xen_destroy_contiguous_region(phys, order); xen_destroy_contiguous_region(phys, order);
xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);

View file

@ -368,6 +368,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
struct buffer_head *bh; struct buffer_head *bh;
struct object_info root_obj; struct object_info root_obj;
unsigned char *b_data; unsigned char *b_data;
unsigned int blocksize;
struct adfs_sb_info *asb; struct adfs_sb_info *asb;
struct inode *root; struct inode *root;
int ret = -EINVAL; int ret = -EINVAL;
@ -419,8 +420,10 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
goto error_free_bh; goto error_free_bh;
} }
blocksize = 1 << dr->log2secsize;
brelse(bh); brelse(bh);
if (sb_set_blocksize(sb, 1 << dr->log2secsize)) {
if (sb_set_blocksize(sb, blocksize)) {
bh = sb_bread(sb, ADFS_DISCRECORD / sb->s_blocksize); bh = sb_bread(sb, ADFS_DISCRECORD / sb->s_blocksize);
if (!bh) { if (!bh) {
adfs_error(sb, "couldn't read superblock on " adfs_error(sb, "couldn't read superblock on "

View file

@ -5835,68 +5835,21 @@ static int changed_extent(struct send_ctx *sctx,
{ {
int ret = 0; int ret = 0;
if (sctx->cur_ino != sctx->cmp_key->objectid) { /*
* We have found an extent item that changed without the inode item
if (result == BTRFS_COMPARE_TREE_CHANGED) { * having changed. This can happen either after relocation (where the
struct extent_buffer *leaf_l; * disk_bytenr of an extent item is replaced at
struct extent_buffer *leaf_r; * relocation.c:replace_file_extents()) or after deduplication into a
struct btrfs_file_extent_item *ei_l; * file in both the parent and send snapshots (where an extent item can
struct btrfs_file_extent_item *ei_r; * get modified or replaced with a new one). Note that deduplication
* updates the inode item, but it only changes the iversion (sequence
leaf_l = sctx->left_path->nodes[0]; * field in the inode item) of the inode, so if a file is deduplicated
leaf_r = sctx->right_path->nodes[0]; * the same amount of times in both the parent and send snapshots, its
ei_l = btrfs_item_ptr(leaf_l, * iversion becames the same in both snapshots, whence the inode item is
sctx->left_path->slots[0], * the same on both snapshots.
struct btrfs_file_extent_item); */
ei_r = btrfs_item_ptr(leaf_r, if (sctx->cur_ino != sctx->cmp_key->objectid)
sctx->right_path->slots[0], return 0;
struct btrfs_file_extent_item);
/*
* We may have found an extent item that has changed
* only its disk_bytenr field and the corresponding
* inode item was not updated. This case happens due to
* very specific timings during relocation when a leaf
* that contains file extent items is COWed while
* relocation is ongoing and its in the stage where it
* updates data pointers. So when this happens we can
* safely ignore it since we know it's the same extent,
* but just at different logical and physical locations
* (when an extent is fully replaced with a new one, we
* know the generation number must have changed too,
* since snapshot creation implies committing the current
* transaction, and the inode item must have been updated
* as well).
* This replacement of the disk_bytenr happens at
* relocation.c:replace_file_extents() through
* relocation.c:btrfs_reloc_cow_block().
*/
if (btrfs_file_extent_generation(leaf_l, ei_l) ==
btrfs_file_extent_generation(leaf_r, ei_r) &&
btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
btrfs_file_extent_compression(leaf_l, ei_l) ==
btrfs_file_extent_compression(leaf_r, ei_r) &&
btrfs_file_extent_encryption(leaf_l, ei_l) ==
btrfs_file_extent_encryption(leaf_r, ei_r) &&
btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
btrfs_file_extent_type(leaf_l, ei_l) ==
btrfs_file_extent_type(leaf_r, ei_r) &&
btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
btrfs_file_extent_offset(leaf_l, ei_l) ==
btrfs_file_extent_offset(leaf_r, ei_r) &&
btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
btrfs_file_extent_num_bytes(leaf_r, ei_r))
return 0;
}
inconsistent_snapshot_error(sctx, result, "extent");
return -EIO;
}
if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
if (result != BTRFS_COMPARE_TREE_DELETED) if (result != BTRFS_COMPARE_TREE_DELETED)

View file

@ -5072,8 +5072,7 @@ static inline int btrfs_chunk_max_errors(struct map_lookup *map)
if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10 | BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID5)) {
BTRFS_BLOCK_GROUP_DUP)) {
max_errors = 1; max_errors = 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) { } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
max_errors = 2; max_errors = 2;

View file

@ -476,7 +476,12 @@ static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
long long release_count, long long release_count,
long long ordered_count) long long ordered_count)
{ {
smp_mb__before_atomic(); /*
* Makes sure operations that setup readdir cache (update page
* cache and i_size) are strongly ordered w.r.t. the following
* atomic64_set() operations.
*/
smp_mb();
atomic64_set(&ci->i_complete_seq[0], release_count); atomic64_set(&ci->i_complete_seq[0], release_count);
atomic64_set(&ci->i_complete_seq[1], ordered_count); atomic64_set(&ci->i_complete_seq[1], ordered_count);
} }

View file

@ -74,7 +74,7 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
const char *ns_field = " pool_namespace="; const char *ns_field = " pool_namespace=";
char buf[128]; char buf[128];
size_t len, total_len = 0; size_t len, total_len = 0;
int ret; ssize_t ret;
pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
@ -98,11 +98,8 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
if (pool_ns) if (pool_ns)
total_len += strlen(ns_field) + pool_ns->len; total_len += strlen(ns_field) + pool_ns->len;
if (!size) { ret = total_len;
ret = total_len; if (size >= total_len) {
} else if (total_len > size) {
ret = -ERANGE;
} else {
memcpy(val, buf, len); memcpy(val, buf, len);
ret = len; ret = len;
if (pool_name) { if (pool_name) {
@ -757,8 +754,11 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
vxattr = ceph_match_vxattr(inode, name); vxattr = ceph_match_vxattr(inode, name);
if (vxattr) { if (vxattr) {
err = -ENODATA; err = -ENODATA;
if (!(vxattr->exists_cb && !vxattr->exists_cb(ci))) if (!(vxattr->exists_cb && !vxattr->exists_cb(ci))) {
err = vxattr->getxattr_cb(ci, value, size); err = vxattr->getxattr_cb(ci, value, size);
if (size && size < err)
err = -ERANGE;
}
return err; return err;
} }

View file

@ -187,8 +187,11 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
if (req->uc_opcode == CODA_OPEN_BY_FD) { if (req->uc_opcode == CODA_OPEN_BY_FD) {
struct coda_open_by_fd_out *outp = struct coda_open_by_fd_out *outp =
(struct coda_open_by_fd_out *)req->uc_data; (struct coda_open_by_fd_out *)req->uc_data;
if (!outp->oh.result) if (!outp->oh.result) {
outp->fh = fget(outp->fd); outp->fh = fget(outp->fd);
if (!outp->fh)
return -EBADF;
}
} }
wake_up(&req->uc_sleep); wake_up(&req->uc_sleep);

View file

@ -1201,6 +1201,24 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
count = -EINTR; count = -EINTR;
goto out_mm; goto out_mm;
} }
/*
* Avoid to modify vma->vm_flags
* without locked ops while the
* coredump reads the vm_flags.
*/
if (!mmget_still_valid(mm)) {
/*
* Silently return "count"
* like if get_task_mm()
* failed. FIXME: should this
* function have returned
* -ESRCH if get_task_mm()
* failed like if
* get_proc_task() fails?
*/
up_write(&mm->mmap_sem);
goto out_mm;
}
for (vma = mm->mmap; vma; vma = vma->vm_next) { for (vma = mm->mmap; vma; vma = vma->vm_next) {
vma->vm_flags &= ~VM_SOFTDIRTY; vma->vm_flags &= ~VM_SOFTDIRTY;
vma_set_page_prot(vma); vma_set_page_prot(vma);

View file

@ -479,6 +479,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
* taking the mmap_sem for writing. * taking the mmap_sem for writing.
*/ */
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
if (!mmget_still_valid(mm))
goto skip_mm;
prev = NULL; prev = NULL;
for (vma = mm->mmap; vma; vma = vma->vm_next) { for (vma = mm->mmap; vma; vma = vma->vm_next) {
cond_resched(); cond_resched();
@ -502,6 +504,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
vma->vm_flags = new_flags; vma->vm_flags = new_flags;
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
} }
skip_mm:
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
mmput(mm); mmput(mm);
wakeup: wakeup:
@ -803,6 +806,9 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
goto out; goto out;
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
if (!mmget_still_valid(mm))
goto out_unlock;
vma = find_vma_prev(mm, start, &prev); vma = find_vma_prev(mm, start, &prev);
if (!vma) if (!vma)
goto out_unlock; goto out_unlock;
@ -962,6 +968,9 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
goto out; goto out;
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
if (!mmget_still_valid(mm))
goto out_unlock;
vma = find_vma_prev(mm, start, &prev); vma = find_vma_prev(mm, start, &prev);
if (!vma) if (!vma)
goto out_unlock; goto out_unlock;

View file

@ -309,7 +309,10 @@ void acpi_set_irq_model(enum acpi_irq_model_id model,
#ifdef CONFIG_X86_IO_APIC #ifdef CONFIG_X86_IO_APIC
extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity); extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
#else #else
#define acpi_get_override_irq(gsi, trigger, polarity) (-1) static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
{
return -1;
}
#endif #endif
/* /*
* This function undoes the effect of one call to acpi_register_gsi(). * This function undoes the effect of one call to acpi_register_gsi().

View file

@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without encumbrance.
#ifndef _CODA_HEADER_ #ifndef _CODA_HEADER_
#define _CODA_HEADER_ #define _CODA_HEADER_
#if defined(__linux__)
typedef unsigned long long u_quad_t; typedef unsigned long long u_quad_t;
#endif
#include <uapi/linux/coda.h> #include <uapi/linux/coda.h>
#endif #endif

View file

@ -19,6 +19,17 @@ struct venus_comm {
struct mutex vc_mutex; struct mutex vc_mutex;
}; };
/* messages between coda filesystem in kernel and Venus */
struct upc_req {
struct list_head uc_chain;
caddr_t uc_data;
u_short uc_flags;
u_short uc_inSize; /* Size is at most 5000 bytes */
u_short uc_outSize;
u_short uc_opcode; /* copied from data to save lookup */
int uc_unique;
wait_queue_head_t uc_sleep; /* process' wait queue */
};
static inline struct venus_comm *coda_vcp(struct super_block *sb) static inline struct venus_comm *coda_vcp(struct super_block *sb)
{ {

View file

@ -54,6 +54,22 @@ extern void __chk_io_ptr(const volatile void __iomem *);
#ifdef __KERNEL__ #ifdef __KERNEL__
/*
* Minimal backport of compiler_attributes.h to add support for __copy
* to v4.9.y so that we can use it in init/exit_module to avoid
* -Werror=missing-attributes errors on GCC 9.
*/
#ifndef __has_attribute
# define __has_attribute(x) __GCC4_has_attribute_##x
# define __GCC4_has_attribute___copy__ 0
#endif
#if __has_attribute(__copy__)
# define __copy(symbol) __attribute__((__copy__(symbol)))
#else
# define __copy(symbol)
#endif
#ifdef __GNUC__ #ifdef __GNUC__
#include <linux/compiler-gcc.h> #include <linux/compiler-gcc.h>
#endif #endif

View file

@ -1193,6 +1193,30 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *); unsigned long size, struct zap_details *);
void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma, void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
/*
* This has to be called after a get_task_mm()/mmget_not_zero()
* followed by taking the mmap_sem for writing before modifying the
* vmas or anything the coredump pretends not to change from under it.
*
* It also has to be called when mmgrab() is used in the context of
* the process, but then the mm_count refcount is transferred outside
* the context of the process to run down_write() on that pinned mm.
*
* NOTE: find_extend_vma() called from GUP context is the only place
* that can modify the "mm" (notably the vm_start/end) under mmap_sem
* for reading and outside the context of the process, so it is also
* the only case that holds the mmap_sem for reading that must call
* this function. Generally if the mmap_sem is hold for reading
* there's no need of this check after get_task_mm()/mmget_not_zero().
*
* This function can be obsoleted and the check can be removed, after
* the coredump code will hold the mmap_sem for writing before
* invoking the ->core_dump methods.
*/
static inline bool mmget_still_valid(struct mm_struct *mm)
{
return likely(!mm->core_state);
}
/** /**
* mm_walk - callbacks for walk_page_range * mm_walk - callbacks for walk_page_range

View file

@ -130,13 +130,13 @@ extern void cleanup_module(void);
#define module_init(initfn) \ #define module_init(initfn) \
static inline initcall_t __maybe_unused __inittest(void) \ static inline initcall_t __maybe_unused __inittest(void) \
{ return initfn; } \ { return initfn; } \
int init_module(void) __attribute__((alias(#initfn))); int init_module(void) __copy(initfn) __attribute__((alias(#initfn)));
/* This is only required if you want to be unloadable. */ /* This is only required if you want to be unloadable. */
#define module_exit(exitfn) \ #define module_exit(exitfn) \
static inline exitcall_t __maybe_unused __exittest(void) \ static inline exitcall_t __maybe_unused __exittest(void) \
{ return exitfn; } \ { return exitfn; } \
void cleanup_module(void) __attribute__((alias(#exitfn))); void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn)));
#endif #endif

View file

@ -6,19 +6,6 @@
#define CODA_PSDEV_MAJOR 67 #define CODA_PSDEV_MAJOR 67
#define MAX_CODADEVS 5 /* how many do we allow */ #define MAX_CODADEVS 5 /* how many do we allow */
/* messages between coda filesystem in kernel and Venus */
struct upc_req {
struct list_head uc_chain;
caddr_t uc_data;
u_short uc_flags;
u_short uc_inSize; /* Size is at most 5000 bytes */
u_short uc_outSize;
u_short uc_opcode; /* copied from data to save lookup */
int uc_unique;
wait_queue_head_t uc_sleep; /* process' wait queue */
};
#define CODA_REQ_ASYNC 0x1 #define CODA_REQ_ASYNC 0x1
#define CODA_REQ_READ 0x2 #define CODA_REQ_READ 0x2
#define CODA_REQ_WRITE 0x4 #define CODA_REQ_WRITE 0x4

View file

@ -369,7 +369,6 @@ static void mqueue_evict_inode(struct inode *inode)
{ {
struct mqueue_inode_info *info; struct mqueue_inode_info *info;
struct user_struct *user; struct user_struct *user;
unsigned long mq_bytes, mq_treesize;
struct ipc_namespace *ipc_ns; struct ipc_namespace *ipc_ns;
struct msg_msg *msg, *nmsg; struct msg_msg *msg, *nmsg;
LIST_HEAD(tmp_msg); LIST_HEAD(tmp_msg);
@ -392,16 +391,18 @@ static void mqueue_evict_inode(struct inode *inode)
free_msg(msg); free_msg(msg);
} }
/* Total amount of bytes accounted for the mqueue */
mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
sizeof(struct posix_msg_tree_node);
mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
info->attr.mq_msgsize);
user = info->user; user = info->user;
if (user) { if (user) {
unsigned long mq_bytes, mq_treesize;
/* Total amount of bytes accounted for the mqueue */
mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
sizeof(struct posix_msg_tree_node);
mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
info->attr.mq_msgsize);
spin_lock(&mq_lock); spin_lock(&mq_lock);
user->mq_bytes -= mq_bytes; user->mq_bytes -= mq_bytes;
/* /*

View file

@ -3362,8 +3362,7 @@ static bool finished_loading(const char *name)
sched_annotate_sleep(); sched_annotate_sleep();
mutex_lock(&module_mutex); mutex_lock(&module_mutex);
mod = find_module_all(name, strlen(name), true); mod = find_module_all(name, strlen(name), true);
ret = !mod || mod->state == MODULE_STATE_LIVE ret = !mod || mod->state == MODULE_STATE_LIVE;
|| mod->state == MODULE_STATE_GOING;
mutex_unlock(&module_mutex); mutex_unlock(&module_mutex);
return ret; return ret;
@ -3526,8 +3525,7 @@ again:
mutex_lock(&module_mutex); mutex_lock(&module_mutex);
old = find_module_all(mod->name, strlen(mod->name), true); old = find_module_all(mod->name, strlen(mod->name), true);
if (old != NULL) { if (old != NULL) {
if (old->state == MODULE_STATE_COMING if (old->state != MODULE_STATE_LIVE) {
|| old->state == MODULE_STATE_UNFORMED) {
/* Wait in case it fails to load. */ /* Wait in case it fails to load. */
mutex_unlock(&module_mutex); mutex_unlock(&module_mutex);
err = wait_event_interruptible(module_wq, err = wait_event_interruptible(module_wq,

View file

@ -1632,6 +1632,11 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
return keep_regs; return keep_regs;
} }
static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
static bool __ftrace_hash_rec_update(struct ftrace_ops *ops, static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
int filter_hash, int filter_hash,
bool inc) bool inc)
@ -1760,15 +1765,17 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
} }
/* /*
* If the rec had TRAMP enabled, then it needs to * The TRAMP needs to be set only if rec count
* be cleared. As TRAMP can only be enabled iff * is decremented to one, and the ops that is
* there is only a single ops attached to it. * left has a trampoline. As TRAMP can only be
* In otherwords, always disable it on decrementing. * enabled if there is only a single ops attached
* In the future, we may set it if rec count is * to it.
* decremented to one, and the ops that is left
* has a trampoline.
*/ */
rec->flags &= ~FTRACE_FL_TRAMP; if (ftrace_rec_count(rec) == 1 &&
ftrace_find_tramp_ops_any(rec))
rec->flags |= FTRACE_FL_TRAMP;
else
rec->flags &= ~FTRACE_FL_TRAMP;
/* /*
* flags will be cleared in ftrace_check_record() * flags will be cleared in ftrace_check_record()
@ -1961,11 +1968,6 @@ static void print_ip_ins(const char *fmt, const unsigned char *p)
printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
} }
static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
static struct ftrace_ops *
ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
enum ftrace_bug_type ftrace_bug_type; enum ftrace_bug_type ftrace_bug_type;
const void *ftrace_expected; const void *ftrace_expected;

View file

@ -268,6 +268,12 @@ int __init cma_declare_contiguous(phys_addr_t base,
*/ */
alignment = max(alignment, (phys_addr_t)PAGE_SIZE << alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
if (fixed && base & (alignment - 1)) {
ret = -EINVAL;
pr_err("Region at %pa must be aligned to %pa bytes\n",
&base, &alignment);
goto err;
}
base = ALIGN(base, alignment); base = ALIGN(base, alignment);
size = ALIGN(size, alignment); size = ALIGN(size, alignment);
limit &= ~(alignment - 1); limit &= ~(alignment - 1);
@ -298,6 +304,13 @@ int __init cma_declare_contiguous(phys_addr_t base,
if (limit == 0 || limit > memblock_end) if (limit == 0 || limit > memblock_end)
limit = memblock_end; limit = memblock_end;
if (base + size > limit) {
ret = -EINVAL;
pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
&size, &base, &limit);
goto err;
}
/* Reserve memory */ /* Reserve memory */
if (fixed) { if (fixed) {
if (memblock_is_region_reserved(base, size) || if (memblock_is_region_reserved(base, size) ||

View file

@ -1004,6 +1004,9 @@ static void collapse_huge_page(struct mm_struct *mm,
* handled by the anon_vma lock + PG_lock. * handled by the anon_vma lock + PG_lock.
*/ */
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
result = SCAN_ANY_PROCESS;
if (!mmget_still_valid(mm))
goto out;
result = hugepage_vma_revalidate(mm, address, &vma); result = hugepage_vma_revalidate(mm, address, &vma);
if (result) if (result)
goto out; goto out;

View file

@ -2457,7 +2457,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
vma = find_vma_prev(mm, addr, &prev); vma = find_vma_prev(mm, addr, &prev);
if (vma && (vma->vm_start <= addr)) if (vma && (vma->vm_start <= addr))
return vma; return vma;
if (!prev || expand_stack(prev, addr)) /* don't alter vm_end if the coredump is running */
if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
return NULL; return NULL;
if (prev->vm_flags & VM_LOCKED) if (prev->vm_flags & VM_LOCKED)
populate_vma_page_range(prev, addr, prev->vm_end, NULL); populate_vma_page_range(prev, addr, prev->vm_end, NULL);
@ -2483,6 +2484,9 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
return vma; return vma;
if (!(vma->vm_flags & VM_GROWSDOWN)) if (!(vma->vm_flags & VM_GROWSDOWN))
return NULL; return NULL;
/* don't alter vm_start if the coredump is running */
if (!mmget_still_valid(mm))
return NULL;
start = vma->vm_start; start = vma->vm_start;
if (expand_stack(vma, addr)) if (expand_stack(vma, addr))
return NULL; return NULL;

View file

@ -266,6 +266,8 @@ static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
return v; return v;
} }
static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap);
/* /*
* Initialize a policy database structure. * Initialize a policy database structure.
*/ */
@ -313,8 +315,10 @@ static int policydb_init(struct policydb *p)
out: out:
hashtab_destroy(p->filename_trans); hashtab_destroy(p->filename_trans);
hashtab_destroy(p->range_tr); hashtab_destroy(p->range_tr);
for (i = 0; i < SYM_NUM; i++) for (i = 0; i < SYM_NUM; i++) {
hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
hashtab_destroy(p->symtab[i].table); hashtab_destroy(p->symtab[i].table);
}
return rc; return rc;
} }

View file

@ -305,7 +305,7 @@ static int read_symbols(struct elf *elf)
if (sym->type != STT_FUNC) if (sym->type != STT_FUNC)
continue; continue;
sym->pfunc = sym->cfunc = sym; sym->pfunc = sym->cfunc = sym;
coldstr = strstr(sym->name, ".cold."); coldstr = strstr(sym->name, ".cold");
if (!coldstr) if (!coldstr)
continue; continue;