This is the 4.9.42 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAlmN0iUACgkQONu9yGCS aT6gZRAAhYbpsz2XRFSQ5H/Sk8xwJuwtLB2at3Y1CLCb+lLhlRsV3l4pD+4KRoEl fU01l5s5ZalZJekGfEfEOQOfoJHsCxzFSzKzP06/GA5u6DbwtHUE2SjNWe84j6Ct Hx0jN90yj7S8vy2umROux+fVvZQ4Xay4TDCWhBeXgOFXevwC/G9D2LWE2NIYwbDH Ighahrhs21FZc9wbah0L04bRBAR7+ALLq1sO8ebKwl8eFzAkcEwI/yS48cnjGlgW 9HW5MmY1BYTnRCrXaw5L0Vf5zH6obT7amrLNljNYN6vN62DRoOfwQh4QcblnIAoi L+HdZilifZ970RwQ2As3vy63/Kk3b207ht4mriTCyGXM9MY6bRovYv1wDAUlv7aD GlA8Q7xwsiJ4sG4i5LABjly+QeWymZ2b0kVWYpneJuBuj/gWVDhh1lfT+nOCAcJ6 ROUY6d64ghKPBomkqlMSC+7sH7QKa0/W9WDQCLxtnmjcAkeElpGNGu/m/Thhvi2I NDq2sbMAeGJquXBXIN8W4NPy0puOn0wjqFI7LE61ujSiAxT8973uDtNlrmB/eCAf zD9yJsKELS20PKToren4hYYuRM2XlKh9gVIOWB2pShfzvSO7807ZyVcoI8/bVgZe I2BH6Dt3t+qqWR7B5/qvxxmNCv3HNMpNUzy/z+fXEf8/U3zqiTM= =n7pX -----END PGP SIGNATURE----- Merge 4.9.42 into android-4.9 Changes in 4.9.42 parisc: Handle vma's whose context is not current in flush_cache_range cgroup: create dfl_root files on subsys registration cgroup: fix error return value from cgroup_subtree_control() libata: array underflow in ata_find_dev() workqueue: restore WQ_UNBOUND/max_active==1 to be ordered iwlwifi: dvm: prevent an out of bounds access brcmfmac: fix memleak due to calling brcmf_sdiod_sgtable_alloc() twice NFSv4: Fix EXCHANGE_ID corrupt verifier issue mmc: sdhci-of-at91: force card detect value for non removable devices device property: Make dev_fwnode() public mmc: core: Fix access to HS400-ES devices mm, mprotect: flush TLB if potentially racing with a parallel reclaim leaving stale TLB entries cpuset: fix a deadlock due to incomplete patching of cpusets_enabled() ALSA: hda - Fix speaker output from VAIO VPCL14M1R drm/amdgpu: Fix undue fallthroughs in golden registers initialization ASoC: do not close shared backend dailink KVM: async_pf: make rcu irq exit if not triggered from idle task mm/page_alloc: Remove kernel address exposure in free_reserved_area() timers: Fix overflow in get_next_timer_interrupt powerpc/tm: Fix saving of TM SPRs in core dump powerpc/64: Fix __check_irq_replay missing decrementer interrupt iommu/amd: Enable ga_log_intr when enabling guest_mode gpiolib: skip unwanted events, don't convert them to opposite edge ext4: fix SEEK_HOLE/SEEK_DATA for blocksize < pagesize ext4: fix overflow caused by missing cast in ext4_resize_fs() ARM: dts: armada-38x: Fix irq type for pca955 ARM: dts: tango4: Request RGMII RX and TX clock delays media: platform: davinci: return -EINVAL for VPFE_CMD_S_CCDC_RAW_PARAMS ioctl iscsi-target: Fix initial login PDU asynchronous socket close OOPs mmc: dw_mmc: Use device_property_read instead of of_property_read mmc: core: Use device_property_read instead of of_property_read media: lirc: LIRC_GET_REC_RESOLUTION should return microseconds f2fs: sanity check checkpoint segno and blkoff Btrfs: fix early ENOSPC due to delalloc saa7164: fix double fetch PCIe access condition tcp_bbr: cut pacing rate only if filled pipe tcp_bbr: introduce bbr_bw_to_pacing_rate() helper tcp_bbr: introduce bbr_init_pacing_rate_from_rtt() helper tcp_bbr: remove sk_pacing_rate=0 transient during init tcp_bbr: init pacing rate on first RTT sample ipv4: ipv6: initialize treq->txhash in cookie_v[46]_check() net: Zero terminate ifr_name in dev_ifname(). ipv6: avoid overflow of offset in ip6_find_1stfragopt net: dsa: b53: Add missing ARL entries for BCM53125 ipv4: initialize fib_trie prior to register_netdev_notifier call. rtnetlink: allocate more memory for dev_set_mac_address() mcs7780: Fix initialization when CONFIG_VMAP_STACK is enabled openvswitch: fix potential out of bound access in parse_ct packet: fix use-after-free in prb_retire_rx_blk_timer_expired() ipv6: Don't increase IPSTATS_MIB_FRAGFAILS twice in ip6_fragment() net: ethernet: nb8800: Handle all 4 RGMII modes identically dccp: fix a memleak that dccp_ipv6 doesn't put reqsk properly dccp: fix a memleak that dccp_ipv4 doesn't put reqsk properly dccp: fix a memleak for dccp_feat_init err process sctp: don't dereference ptr before leaving _sctp_walk_{params, errors}() sctp: fix the check for _sctp_walk_params and _sctp_walk_errors net/mlx5: Consider tx_enabled in all modes on remap net/mlx5: Fix command bad flow on command entry allocation failure net/mlx5e: Fix outer_header_zero() check size net/mlx5e: Fix wrong delay calculation for overflow check scheduling net/mlx5e: Schedule overflow check work to mlx5e workqueue net: phy: Correctly process PHY_HALTED in phy_stop_machine() xen-netback: correctly schedule rate-limited queues sparc64: Measure receiver forward progress to avoid send mondo timeout sparc64: Fix exception handling in UltraSPARC-III memcpy. wext: handle NULL extra data in iwe_stream_add_point better sh_eth: fix EESIPR values for SH77{34|63} sh_eth: R8A7740 supports packet shecksumming net: phy: dp83867: fix irq generation tg3: Fix race condition in tg3_get_stats64(). x86/boot: Add missing declaration of string functions spi: spi-axi: Free resources on error path ASoC: rt5645: set sel_i2s_pre_div1 to 2 netfilter: use fwmark_reflect in nf_send_reset phy state machine: failsafe leave invalid RUNNING state ipv4: make tcp_notsent_lowat sysctl knob behave as true unsigned int clk/samsung: exynos542x: mark some clocks as critical scsi: qla2xxx: Get mutex lock before checking optrom_state drm/virtio: fix framebuffer sparse warning ARM: dts: sun8i: Support DTB build for NanoPi M1 ARM: dts: sunxi: Change node name for pwrseq pin on Olinuxino-lime2-emmc iw_cxgb4: do not send RX_DATA_ACK CPLs after close/abort nbd: blk_mq_init_queue returns an error code on failure, not NULL virtio_blk: fix panic in initialization error path ARM: 8632/1: ftrace: fix syscall name matching mm, slab: make sure that KMALLOC_MAX_SIZE will fit into MAX_ORDER lib/Kconfig.debug: fix frv build failure signal: protect SIGNAL_UNKILLABLE from unintentional clearing. mm: don't dereference struct page fields of invalid pages net/mlx5: E-Switch, Re-enable RoCE on mode change only after FDB destroy ipv4: Should use consistent conditional judgement for ip fragment in __ip_append_data and ip_finish_output net: account for current skb length when deciding about UFO net: phy: Fix PHY unbind crash workqueue: implicit ordered attribute should be overridable Linux 4.9.42 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
02f29ab1b9
103 changed files with 799 additions and 382 deletions
2
Makefile
2
Makefile
|
@ -1,6 +1,6 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 9
|
||||
SUBLEVEL = 41
|
||||
SUBLEVEL = 42
|
||||
EXTRAVERSION =
|
||||
NAME = Roaring Lionus
|
||||
|
||||
|
|
|
@ -820,6 +820,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
|
|||
sun8i-a83t-allwinner-h8homlet-v2.dtb \
|
||||
sun8i-a83t-cubietruck-plus.dtb \
|
||||
sun8i-h3-bananapi-m2-plus.dtb \
|
||||
sun8i-h3-nanopi-m1.dtb \
|
||||
sun8i-h3-nanopi-neo.dtb \
|
||||
sun8i-h3-orangepi-2.dtb \
|
||||
sun8i-h3-orangepi-lite.dtb \
|
||||
|
|
|
@ -75,7 +75,7 @@
|
|||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pca0_pins>;
|
||||
interrupt-parent = <&gpio0>;
|
||||
interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
|
||||
interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
|
@ -87,7 +87,7 @@
|
|||
compatible = "nxp,pca9555";
|
||||
pinctrl-names = "default";
|
||||
interrupt-parent = <&gpio0>;
|
||||
interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
|
||||
interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
|
|
|
@ -56,7 +56,7 @@
|
|||
};
|
||||
|
||||
&pio {
|
||||
mmc2_pins_nrst: mmc2@0 {
|
||||
mmc2_pins_nrst: mmc2-rst-pin {
|
||||
allwinner,pins = "PC16";
|
||||
allwinner,function = "gpio_out";
|
||||
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
};
|
||||
|
||||
ð0 {
|
||||
phy-connection-type = "rgmii";
|
||||
phy-connection-type = "rgmii-id";
|
||||
phy-handle = <ð0_phy>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
|
|
@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level)
|
|||
|
||||
#define ftrace_return_address(n) return_address(n)
|
||||
|
||||
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
|
||||
|
||||
static inline bool arch_syscall_match_sym_name(const char *sym,
|
||||
const char *name)
|
||||
{
|
||||
if (!strcmp(sym, "sys_mmap2"))
|
||||
sym = "sys_mmap_pgoff";
|
||||
else if (!strcmp(sym, "sys_statfs64_wrapper"))
|
||||
sym = "sys_statfs64";
|
||||
else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
|
||||
sym = "sys_fstatfs64";
|
||||
else if (!strcmp(sym, "sys_arm_fadvise64_64"))
|
||||
sym = "sys_fadvise64_64";
|
||||
|
||||
/* Ignore case since sym may start with "SyS" instead of "sys" */
|
||||
return !strcasecmp(sym, name);
|
||||
}
|
||||
|
||||
#endif /* ifndef __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_ARM_FTRACE */
|
||||
|
|
|
@ -604,13 +604,12 @@ void flush_cache_range(struct vm_area_struct *vma,
|
|||
if (parisc_requires_coherency())
|
||||
flush_tlb_range(vma, start, end);
|
||||
|
||||
if ((end - start) >= parisc_cache_flush_threshold) {
|
||||
if ((end - start) >= parisc_cache_flush_threshold
|
||||
|| vma->vm_mm->context != mfsp(3)) {
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
|
||||
BUG_ON(vma->vm_mm->context != mfsp(3));
|
||||
|
||||
flush_user_dcache_range_asm(start, end);
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_user_icache_range_asm(start, end);
|
||||
|
|
|
@ -146,6 +146,19 @@ notrace unsigned int __check_irq_replay(void)
|
|||
|
||||
/* Clear bit 0 which we wouldn't clear otherwise */
|
||||
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
|
||||
if (happened & PACA_IRQ_HARD_DIS) {
|
||||
/*
|
||||
* We may have missed a decrementer interrupt if hard disabled.
|
||||
* Check the decrementer register in case we had a rollover
|
||||
* while hard disabled.
|
||||
*/
|
||||
if (!(happened & PACA_IRQ_DEC)) {
|
||||
if (decrementer_check_overflow()) {
|
||||
local_paca->irq_happened |= PACA_IRQ_DEC;
|
||||
happened |= PACA_IRQ_DEC;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Force the delivery of pending soft-disabled interrupts on PS3.
|
||||
|
@ -171,7 +184,7 @@ notrace unsigned int __check_irq_replay(void)
|
|||
* in case we also had a rollover while hard disabled
|
||||
*/
|
||||
local_paca->irq_happened &= ~PACA_IRQ_DEC;
|
||||
if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
|
||||
if (happened & PACA_IRQ_DEC)
|
||||
return 0x900;
|
||||
|
||||
/* Finally check if an external interrupt happened */
|
||||
|
|
|
@ -127,12 +127,19 @@ static void flush_tmregs_to_thread(struct task_struct *tsk)
|
|||
* If task is not current, it will have been flushed already to
|
||||
* it's thread_struct during __switch_to().
|
||||
*
|
||||
* A reclaim flushes ALL the state.
|
||||
* A reclaim flushes ALL the state or if not in TM save TM SPRs
|
||||
* in the appropriate thread structures from live.
|
||||
*/
|
||||
|
||||
if (tsk == current && MSR_TM_SUSPENDED(mfmsr()))
|
||||
tm_reclaim_current(TM_CAUSE_SIGNAL);
|
||||
if (tsk != current)
|
||||
return;
|
||||
|
||||
if (MSR_TM_SUSPENDED(mfmsr())) {
|
||||
tm_reclaim_current(TM_CAUSE_SIGNAL);
|
||||
} else {
|
||||
tm_enable();
|
||||
tm_save_sprs(&(tsk->thread));
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
|
||||
|
|
|
@ -54,6 +54,7 @@ extern struct trap_per_cpu trap_block[NR_CPUS];
|
|||
void init_cur_cpu_trap(struct thread_info *);
|
||||
void setup_tba(void);
|
||||
extern int ncpus_probed;
|
||||
extern u64 cpu_mondo_counter[NR_CPUS];
|
||||
|
||||
unsigned long real_hard_smp_processor_id(void);
|
||||
|
||||
|
|
|
@ -621,22 +621,48 @@ retry:
|
|||
}
|
||||
}
|
||||
|
||||
/* Multi-cpu list version. */
|
||||
#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
|
||||
#define MONDO_USEC_WAIT_MIN 2
|
||||
#define MONDO_USEC_WAIT_MAX 100
|
||||
#define MONDO_RETRY_LIMIT 500000
|
||||
|
||||
/* Multi-cpu list version.
|
||||
*
|
||||
* Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
|
||||
* Sometimes not all cpus receive the mondo, requiring us to re-send
|
||||
* the mondo until all cpus have received, or cpus are truly stuck
|
||||
* unable to receive mondo, and we timeout.
|
||||
* Occasionally a target cpu strand is borrowed briefly by hypervisor to
|
||||
* perform guest service, such as PCIe error handling. Consider the
|
||||
* service time, 1 second overall wait is reasonable for 1 cpu.
|
||||
* Here two in-between mondo check wait time are defined: 2 usec for
|
||||
* single cpu quick turn around and up to 100usec for large cpu count.
|
||||
* Deliver mondo to large number of cpus could take longer, we adjusts
|
||||
* the retry count as long as target cpus are making forward progress.
|
||||
*/
|
||||
static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
|
||||
{
|
||||
int retries, this_cpu, prev_sent, i, saw_cpu_error;
|
||||
int this_cpu, tot_cpus, prev_sent, i, rem;
|
||||
int usec_wait, retries, tot_retries;
|
||||
u16 first_cpu = 0xffff;
|
||||
unsigned long xc_rcvd = 0;
|
||||
unsigned long status;
|
||||
int ecpuerror_id = 0;
|
||||
int enocpu_id = 0;
|
||||
u16 *cpu_list;
|
||||
u16 cpu;
|
||||
|
||||
this_cpu = smp_processor_id();
|
||||
|
||||
cpu_list = __va(tb->cpu_list_pa);
|
||||
|
||||
saw_cpu_error = 0;
|
||||
retries = 0;
|
||||
usec_wait = cnt * MONDO_USEC_WAIT_MIN;
|
||||
if (usec_wait > MONDO_USEC_WAIT_MAX)
|
||||
usec_wait = MONDO_USEC_WAIT_MAX;
|
||||
retries = tot_retries = 0;
|
||||
tot_cpus = cnt;
|
||||
prev_sent = 0;
|
||||
|
||||
do {
|
||||
int forward_progress, n_sent;
|
||||
int n_sent, mondo_delivered, target_cpu_busy;
|
||||
|
||||
status = sun4v_cpu_mondo_send(cnt,
|
||||
tb->cpu_list_pa,
|
||||
|
@ -644,94 +670,113 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
|
|||
|
||||
/* HV_EOK means all cpus received the xcall, we're done. */
|
||||
if (likely(status == HV_EOK))
|
||||
break;
|
||||
goto xcall_done;
|
||||
|
||||
/* If not these non-fatal errors, panic */
|
||||
if (unlikely((status != HV_EWOULDBLOCK) &&
|
||||
(status != HV_ECPUERROR) &&
|
||||
(status != HV_ENOCPU)))
|
||||
goto fatal_errors;
|
||||
|
||||
/* First, see if we made any forward progress.
|
||||
*
|
||||
* Go through the cpu_list, count the target cpus that have
|
||||
* received our mondo (n_sent), and those that did not (rem).
|
||||
* Re-pack cpu_list with the cpus remain to be retried in the
|
||||
* front - this simplifies tracking the truly stalled cpus.
|
||||
*
|
||||
* The hypervisor indicates successful sends by setting
|
||||
* cpu list entries to the value 0xffff.
|
||||
*
|
||||
* EWOULDBLOCK means some target cpus did not receive the
|
||||
* mondo and retry usually helps.
|
||||
*
|
||||
* ECPUERROR means at least one target cpu is in error state,
|
||||
* it's usually safe to skip the faulty cpu and retry.
|
||||
*
|
||||
* ENOCPU means one of the target cpu doesn't belong to the
|
||||
* domain, perhaps offlined which is unexpected, but not
|
||||
* fatal and it's okay to skip the offlined cpu.
|
||||
*/
|
||||
rem = 0;
|
||||
n_sent = 0;
|
||||
for (i = 0; i < cnt; i++) {
|
||||
if (likely(cpu_list[i] == 0xffff))
|
||||
cpu = cpu_list[i];
|
||||
if (likely(cpu == 0xffff)) {
|
||||
n_sent++;
|
||||
} else if ((status == HV_ECPUERROR) &&
|
||||
(sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
|
||||
ecpuerror_id = cpu + 1;
|
||||
} else if (status == HV_ENOCPU && !cpu_online(cpu)) {
|
||||
enocpu_id = cpu + 1;
|
||||
} else {
|
||||
cpu_list[rem++] = cpu;
|
||||
}
|
||||
}
|
||||
|
||||
forward_progress = 0;
|
||||
if (n_sent > prev_sent)
|
||||
forward_progress = 1;
|
||||
/* No cpu remained, we're done. */
|
||||
if (rem == 0)
|
||||
break;
|
||||
|
||||
/* Otherwise, update the cpu count for retry. */
|
||||
cnt = rem;
|
||||
|
||||
/* Record the overall number of mondos received by the
|
||||
* first of the remaining cpus.
|
||||
*/
|
||||
if (first_cpu != cpu_list[0]) {
|
||||
first_cpu = cpu_list[0];
|
||||
xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
|
||||
}
|
||||
|
||||
/* Was any mondo delivered successfully? */
|
||||
mondo_delivered = (n_sent > prev_sent);
|
||||
prev_sent = n_sent;
|
||||
|
||||
/* If we get a HV_ECPUERROR, then one or more of the cpus
|
||||
* in the list are in error state. Use the cpu_state()
|
||||
* hypervisor call to find out which cpus are in error state.
|
||||
/* or, was any target cpu busy processing other mondos? */
|
||||
target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
|
||||
xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
|
||||
|
||||
/* Retry count is for no progress. If we're making progress,
|
||||
* reset the retry count.
|
||||
*/
|
||||
if (unlikely(status == HV_ECPUERROR)) {
|
||||
for (i = 0; i < cnt; i++) {
|
||||
long err;
|
||||
u16 cpu;
|
||||
|
||||
cpu = cpu_list[i];
|
||||
if (cpu == 0xffff)
|
||||
continue;
|
||||
|
||||
err = sun4v_cpu_state(cpu);
|
||||
if (err == HV_CPU_STATE_ERROR) {
|
||||
saw_cpu_error = (cpu + 1);
|
||||
cpu_list[i] = 0xffff;
|
||||
}
|
||||
}
|
||||
} else if (unlikely(status != HV_EWOULDBLOCK))
|
||||
goto fatal_mondo_error;
|
||||
|
||||
/* Don't bother rewriting the CPU list, just leave the
|
||||
* 0xffff and non-0xffff entries in there and the
|
||||
* hypervisor will do the right thing.
|
||||
*
|
||||
* Only advance timeout state if we didn't make any
|
||||
* forward progress.
|
||||
*/
|
||||
if (unlikely(!forward_progress)) {
|
||||
if (unlikely(++retries > 10000))
|
||||
if (likely(mondo_delivered || target_cpu_busy)) {
|
||||
tot_retries += retries;
|
||||
retries = 0;
|
||||
} else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
|
||||
goto fatal_mondo_timeout;
|
||||
|
||||
/* Delay a little bit to let other cpus catch up
|
||||
* on their cpu mondo queue work.
|
||||
*/
|
||||
udelay(2 * cnt);
|
||||
}
|
||||
|
||||
/* Delay a little bit to let other cpus catch up on
|
||||
* their cpu mondo queue work.
|
||||
*/
|
||||
if (!mondo_delivered)
|
||||
udelay(usec_wait);
|
||||
|
||||
retries++;
|
||||
} while (1);
|
||||
|
||||
if (unlikely(saw_cpu_error))
|
||||
goto fatal_mondo_cpu_error;
|
||||
|
||||
xcall_done:
|
||||
if (unlikely(ecpuerror_id > 0)) {
|
||||
pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
|
||||
this_cpu, ecpuerror_id - 1);
|
||||
} else if (unlikely(enocpu_id > 0)) {
|
||||
pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
|
||||
this_cpu, enocpu_id - 1);
|
||||
}
|
||||
return;
|
||||
|
||||
fatal_mondo_cpu_error:
|
||||
printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
|
||||
"(including %d) were in error state\n",
|
||||
this_cpu, saw_cpu_error - 1);
|
||||
return;
|
||||
fatal_errors:
|
||||
/* fatal errors include bad alignment, etc */
|
||||
pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
|
||||
this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
|
||||
panic("Unexpected SUN4V mondo error %lu\n", status);
|
||||
|
||||
fatal_mondo_timeout:
|
||||
printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
|
||||
" progress after %d retries.\n",
|
||||
this_cpu, retries);
|
||||
goto dump_cpu_list_and_out;
|
||||
|
||||
fatal_mondo_error:
|
||||
printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
|
||||
this_cpu, status);
|
||||
printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
|
||||
"mondo_block_pa(%lx)\n",
|
||||
this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
|
||||
|
||||
dump_cpu_list_and_out:
|
||||
printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
|
||||
for (i = 0; i < cnt; i++)
|
||||
printk("%u ", cpu_list[i]);
|
||||
printk("]\n");
|
||||
/* some cpus being non-responsive to the cpu mondo */
|
||||
pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
|
||||
this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
|
||||
panic("SUN4V mondo timeout panic\n");
|
||||
}
|
||||
|
||||
static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
|
||||
|
|
|
@ -26,6 +26,21 @@ sun4v_cpu_mondo:
|
|||
ldxa [%g0] ASI_SCRATCHPAD, %g4
|
||||
sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
|
||||
|
||||
/* Get smp_processor_id() into %g3 */
|
||||
sethi %hi(trap_block), %g5
|
||||
or %g5, %lo(trap_block), %g5
|
||||
sub %g4, %g5, %g3
|
||||
srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
|
||||
|
||||
/* Increment cpu_mondo_counter[smp_processor_id()] */
|
||||
sethi %hi(cpu_mondo_counter), %g5
|
||||
or %g5, %lo(cpu_mondo_counter), %g5
|
||||
sllx %g3, 3, %g3
|
||||
add %g5, %g3, %g5
|
||||
ldx [%g5], %g3
|
||||
add %g3, 1, %g3
|
||||
stx %g3, [%g5]
|
||||
|
||||
/* Get CPU mondo queue base phys address into %g7. */
|
||||
ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
|
||||
|
||||
|
|
|
@ -2732,6 +2732,7 @@ void do_getpsr(struct pt_regs *regs)
|
|||
}
|
||||
}
|
||||
|
||||
u64 cpu_mondo_counter[NR_CPUS] = {0};
|
||||
struct trap_per_cpu trap_block[NR_CPUS];
|
||||
EXPORT_SYMBOL(trap_block);
|
||||
|
||||
|
|
|
@ -145,13 +145,13 @@ ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
|
|||
ENTRY(U3_retl_o2_and_7_plus_GS)
|
||||
and %o2, 7, %o2
|
||||
retl
|
||||
add %o2, GLOBAL_SPARE, %o2
|
||||
add %o2, GLOBAL_SPARE, %o0
|
||||
ENDPROC(U3_retl_o2_and_7_plus_GS)
|
||||
ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
|
||||
add GLOBAL_SPARE, 8, GLOBAL_SPARE
|
||||
and %o2, 7, %o2
|
||||
retl
|
||||
add %o2, GLOBAL_SPARE, %o2
|
||||
add %o2, GLOBAL_SPARE, %o0
|
||||
ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
|
||||
#endif
|
||||
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
#include "ctype.h"
|
||||
#include "string.h"
|
||||
|
||||
int memcmp(const void *s1, const void *s2, size_t len)
|
||||
{
|
||||
|
|
|
@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len);
|
|||
#define memset(d,c,l) __builtin_memset(d,c,l)
|
||||
#define memcmp __builtin_memcmp
|
||||
|
||||
extern int strcmp(const char *str1, const char *str2);
|
||||
extern int strncmp(const char *cs, const char *ct, size_t count);
|
||||
extern size_t strlen(const char *s);
|
||||
extern char *strstr(const char *s1, const char *s2);
|
||||
extern size_t strnlen(const char *s, size_t maxlen);
|
||||
extern unsigned int atou(const char *s);
|
||||
extern unsigned long long simple_strtoull(const char *cp, char **endp,
|
||||
unsigned int base);
|
||||
|
||||
#endif /* BOOT_STRING_H */
|
||||
|
|
|
@ -152,6 +152,8 @@ void kvm_async_pf_task_wait(u32 token)
|
|||
if (hlist_unhashed(&n.link))
|
||||
break;
|
||||
|
||||
rcu_irq_exit();
|
||||
|
||||
if (!n.halted) {
|
||||
local_irq_enable();
|
||||
schedule();
|
||||
|
@ -160,11 +162,11 @@ void kvm_async_pf_task_wait(u32 token)
|
|||
/*
|
||||
* We cannot reschedule. So halt.
|
||||
*/
|
||||
rcu_irq_exit();
|
||||
native_safe_halt();
|
||||
local_irq_disable();
|
||||
rcu_irq_enter();
|
||||
}
|
||||
|
||||
rcu_irq_enter();
|
||||
}
|
||||
if (!n.halted)
|
||||
finish_swait(&n.wq, &wait);
|
||||
|
|
|
@ -2971,10 +2971,12 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
|
|||
static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
|
||||
{
|
||||
if (!sata_pmp_attached(ap)) {
|
||||
if (likely(devno < ata_link_max_devices(&ap->link)))
|
||||
if (likely(devno >= 0 &&
|
||||
devno < ata_link_max_devices(&ap->link)))
|
||||
return &ap->link.device[devno];
|
||||
} else {
|
||||
if (likely(devno < ap->nr_pmp_links))
|
||||
if (likely(devno >= 0 &&
|
||||
devno < ap->nr_pmp_links))
|
||||
return &ap->pmp_link[devno].device[0];
|
||||
}
|
||||
|
||||
|
|
|
@ -182,11 +182,12 @@ static int pset_prop_read_string(struct property_set *pset,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline struct fwnode_handle *dev_fwnode(struct device *dev)
|
||||
struct fwnode_handle *dev_fwnode(struct device *dev)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_OF) && dev->of_node ?
|
||||
&dev->of_node->fwnode : dev->fwnode;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_fwnode);
|
||||
|
||||
/**
|
||||
* device_property_present - check if a property of a device is present
|
||||
|
|
|
@ -929,6 +929,7 @@ static int __init nbd_init(void)
|
|||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < nbds_max; i++) {
|
||||
struct request_queue *q;
|
||||
struct gendisk *disk = alloc_disk(1 << part_shift);
|
||||
if (!disk)
|
||||
goto out;
|
||||
|
@ -954,12 +955,13 @@ static int __init nbd_init(void)
|
|||
* every gendisk to have its very own request_queue struct.
|
||||
* These structs are big so we dynamically allocate them.
|
||||
*/
|
||||
disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set);
|
||||
if (!disk->queue) {
|
||||
q = blk_mq_init_queue(&nbd_dev[i].tag_set);
|
||||
if (IS_ERR(q)) {
|
||||
blk_mq_free_tag_set(&nbd_dev[i].tag_set);
|
||||
put_disk(disk);
|
||||
goto out;
|
||||
}
|
||||
disk->queue = q;
|
||||
|
||||
/*
|
||||
* Tell the block layer that we are not a rotational device
|
||||
|
|
|
@ -630,11 +630,12 @@ static int virtblk_probe(struct virtio_device *vdev)
|
|||
if (err)
|
||||
goto out_put_disk;
|
||||
|
||||
q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
|
||||
q = blk_mq_init_queue(&vblk->tag_set);
|
||||
if (IS_ERR(q)) {
|
||||
err = -ENOMEM;
|
||||
goto out_free_tags;
|
||||
}
|
||||
vblk->disk->queue = q;
|
||||
|
||||
q->queuedata = vblk;
|
||||
|
||||
|
|
|
@ -586,7 +586,7 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
|
|||
GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
|
||||
GATE_BUS_TOP, 24, 0, 0),
|
||||
GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
|
||||
GATE_BUS_TOP, 27, 0, 0),
|
||||
GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
|
||||
};
|
||||
|
||||
static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
|
||||
|
@ -956,20 +956,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
|
|||
GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0),
|
||||
|
||||
GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys",
|
||||
GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0),
|
||||
GATE_BUS_FSYS0, 9, CLK_IS_CRITICAL, 0),
|
||||
GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2",
|
||||
GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0),
|
||||
|
||||
GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d",
|
||||
GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0),
|
||||
GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d",
|
||||
GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0),
|
||||
GATE_BUS_TOP, 1, CLK_IS_CRITICAL, 0),
|
||||
GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
|
||||
GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
|
||||
GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
|
||||
GATE_BUS_TOP, 5, 0, 0),
|
||||
GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
|
||||
GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0),
|
||||
GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0),
|
||||
GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
|
||||
GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
|
||||
GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
|
||||
|
@ -983,20 +983,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
|
|||
GATE(0, "aclk166", "mout_user_aclk166",
|
||||
GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
|
||||
GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
|
||||
GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0),
|
||||
GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0),
|
||||
GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
|
||||
GATE_BUS_TOP, 16, 0, 0),
|
||||
GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
|
||||
GATE_BUS_TOP, 17, 0, 0),
|
||||
GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
|
||||
GATE_BUS_TOP, 18, 0, 0),
|
||||
GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
|
||||
GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
|
||||
GATE_BUS_TOP, 28, 0, 0),
|
||||
GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m",
|
||||
GATE_BUS_TOP, 29, 0, 0),
|
||||
|
||||
GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1",
|
||||
SRC_MASK_TOP2, 24, 0, 0),
|
||||
SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0),
|
||||
|
||||
GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
|
||||
SRC_MASK_TOP7, 20, 0, 0),
|
||||
|
|
|
@ -703,24 +703,23 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
|
|||
{
|
||||
struct lineevent_state *le = p;
|
||||
struct gpioevent_data ge;
|
||||
int ret;
|
||||
int ret, level;
|
||||
|
||||
ge.timestamp = ktime_get_real_ns();
|
||||
level = gpiod_get_value_cansleep(le->desc);
|
||||
|
||||
if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
|
||||
&& le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
|
||||
int level = gpiod_get_value_cansleep(le->desc);
|
||||
|
||||
if (level)
|
||||
/* Emit low-to-high event */
|
||||
ge.id = GPIOEVENT_EVENT_RISING_EDGE;
|
||||
else
|
||||
/* Emit high-to-low event */
|
||||
ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
|
||||
} else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE) {
|
||||
} else if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE && level) {
|
||||
/* Emit low-to-high event */
|
||||
ge.id = GPIOEVENT_EVENT_RISING_EDGE;
|
||||
} else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
|
||||
} else if (le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE && !level) {
|
||||
/* Emit high-to-low event */
|
||||
ge.id = GPIOEVENT_EVENT_FALLING_EDGE;
|
||||
} else {
|
||||
|
|
|
@ -1301,6 +1301,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
|
|||
amdgpu_program_register_sequence(adev,
|
||||
pitcairn_mgcg_cgcg_init,
|
||||
(const u32)ARRAY_SIZE(pitcairn_mgcg_cgcg_init));
|
||||
break;
|
||||
case CHIP_VERDE:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
verde_golden_registers,
|
||||
|
@ -1325,6 +1326,7 @@ static void si_init_golden_registers(struct amdgpu_device *adev)
|
|||
amdgpu_program_register_sequence(adev,
|
||||
oland_mgcg_cgcg_init,
|
||||
(const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
|
||||
break;
|
||||
case CHIP_HAINAN:
|
||||
amdgpu_program_register_sequence(adev,
|
||||
hainan_golden_registers,
|
||||
|
|
|
@ -337,7 +337,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
|
|||
info->fbops = &virtio_gpufb_ops;
|
||||
info->pixmap.flags = FB_PIXMAP_SYSTEM;
|
||||
|
||||
info->screen_base = obj->vmap;
|
||||
info->screen_buffer = obj->vmap;
|
||||
info->screen_size = obj->gem_base.size;
|
||||
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
|
||||
drm_fb_helper_fill_var(info, &vfbdev->helper,
|
||||
|
|
|
@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
skb_trim(skb, dlen);
|
||||
mutex_lock(&ep->com.mutex);
|
||||
|
||||
/* update RX credits */
|
||||
update_rx_credits(ep, dlen);
|
||||
|
||||
switch (ep->com.state) {
|
||||
case MPA_REQ_SENT:
|
||||
update_rx_credits(ep, dlen);
|
||||
ep->rcv_seq += dlen;
|
||||
disconnect = process_mpa_reply(ep, skb);
|
||||
break;
|
||||
case MPA_REQ_WAIT:
|
||||
update_rx_credits(ep, dlen);
|
||||
ep->rcv_seq += dlen;
|
||||
disconnect = process_mpa_request(ep, skb);
|
||||
break;
|
||||
case FPDU_MODE: {
|
||||
struct c4iw_qp_attributes attrs;
|
||||
|
||||
update_rx_credits(ep, dlen);
|
||||
BUG_ON(!ep->com.qp);
|
||||
if (status)
|
||||
pr_err("%s Unexpected streaming data." \
|
||||
|
|
|
@ -4294,6 +4294,7 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
|
|||
/* Setting */
|
||||
irte->hi.fields.ga_root_ptr = (pi_data->base >> 12);
|
||||
irte->hi.fields.vector = vcpu_pi_info->vector;
|
||||
irte->lo.fields_vapic.ga_log_intr = 1;
|
||||
irte->lo.fields_vapic.guest_mode = 1;
|
||||
irte->lo.fields_vapic.ga_tag = pi_data->ga_tag;
|
||||
|
||||
|
|
|
@ -393,11 +393,11 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
|
|||
msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
|
||||
msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
|
||||
msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector);
|
||||
memcpy(msg, &msg_tmp, sizeof(*msg));
|
||||
|
||||
/* No need to update the read positions, because this was a peek */
|
||||
/* If the caller specifically want to peek, return */
|
||||
if (peekonly) {
|
||||
memcpy(msg, &msg_tmp, sizeof(*msg));
|
||||
goto peekout;
|
||||
}
|
||||
|
||||
|
@ -442,21 +442,15 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
|
|||
space_rem = bus->m_dwSizeGetRing - curr_grp;
|
||||
|
||||
if (space_rem < sizeof(*msg)) {
|
||||
/* msg wraps around the ring */
|
||||
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem);
|
||||
memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing,
|
||||
sizeof(*msg) - space_rem);
|
||||
if (buf)
|
||||
memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) -
|
||||
space_rem, buf_size);
|
||||
|
||||
} else if (space_rem == sizeof(*msg)) {
|
||||
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
|
||||
if (buf)
|
||||
memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
|
||||
} else {
|
||||
/* Additional data wraps around the ring */
|
||||
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
|
||||
if (buf) {
|
||||
memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp +
|
||||
sizeof(*msg), space_rem - sizeof(*msg));
|
||||
|
@ -469,15 +463,10 @@ int saa7164_bus_get(struct saa7164_dev *dev, struct tmComResInfo* msg,
|
|||
|
||||
} else {
|
||||
/* No wrapping */
|
||||
memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
|
||||
if (buf)
|
||||
memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg),
|
||||
buf_size);
|
||||
}
|
||||
/* Convert from little endian to CPU */
|
||||
msg->size = le16_to_cpu((__force __le16)msg->size);
|
||||
msg->command = le32_to_cpu((__force __le32)msg->command);
|
||||
msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector);
|
||||
|
||||
/* Update the read positions, adjusting the ring */
|
||||
saa7164_writel(bus->m_dwGetReadPos, new_grp);
|
||||
|
|
|
@ -1725,27 +1725,9 @@ static long vpfe_param_handler(struct file *file, void *priv,
|
|||
|
||||
switch (cmd) {
|
||||
case VPFE_CMD_S_CCDC_RAW_PARAMS:
|
||||
v4l2_warn(&vpfe_dev->v4l2_dev,
|
||||
"VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
|
||||
if (ccdc_dev->hw_ops.set_params) {
|
||||
ret = ccdc_dev->hw_ops.set_params(param);
|
||||
if (ret) {
|
||||
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
|
||||
"Error setting parameters in CCDC\n");
|
||||
goto unlock_out;
|
||||
}
|
||||
ret = vpfe_get_ccdc_image_format(vpfe_dev,
|
||||
&vpfe_dev->fmt);
|
||||
if (ret < 0) {
|
||||
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
|
||||
"Invalid image format at CCDC\n");
|
||||
goto unlock_out;
|
||||
}
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
|
||||
v4l2_warn(&vpfe_dev->v4l2_dev,
|
||||
"VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ret = -ENOTTY;
|
||||
|
|
|
@ -254,7 +254,7 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
|
|||
return 0;
|
||||
|
||||
case LIRC_GET_REC_RESOLUTION:
|
||||
val = dev->rx_resolution;
|
||||
val = dev->rx_resolution / 1000;
|
||||
break;
|
||||
|
||||
case LIRC_SET_WIDEBAND_RECEIVER:
|
||||
|
|
|
@ -177,19 +177,17 @@ static void mmc_retune_timer(unsigned long data)
|
|||
*/
|
||||
int mmc_of_parse(struct mmc_host *host)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct device *dev = host->parent;
|
||||
u32 bus_width;
|
||||
int ret;
|
||||
bool cd_cap_invert, cd_gpio_invert = false;
|
||||
bool ro_cap_invert, ro_gpio_invert = false;
|
||||
|
||||
if (!host->parent || !host->parent->of_node)
|
||||
if (!dev || !dev_fwnode(dev))
|
||||
return 0;
|
||||
|
||||
np = host->parent->of_node;
|
||||
|
||||
/* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */
|
||||
if (of_property_read_u32(np, "bus-width", &bus_width) < 0) {
|
||||
if (device_property_read_u32(dev, "bus-width", &bus_width) < 0) {
|
||||
dev_dbg(host->parent,
|
||||
"\"bus-width\" property is missing, assuming 1 bit.\n");
|
||||
bus_width = 1;
|
||||
|
@ -211,7 +209,7 @@ int mmc_of_parse(struct mmc_host *host)
|
|||
}
|
||||
|
||||
/* f_max is obtained from the optional "max-frequency" property */
|
||||
of_property_read_u32(np, "max-frequency", &host->f_max);
|
||||
device_property_read_u32(dev, "max-frequency", &host->f_max);
|
||||
|
||||
/*
|
||||
* Configure CD and WP pins. They are both by default active low to
|
||||
|
@ -226,12 +224,12 @@ int mmc_of_parse(struct mmc_host *host)
|
|||
*/
|
||||
|
||||
/* Parse Card Detection */
|
||||
if (of_property_read_bool(np, "non-removable")) {
|
||||
if (device_property_read_bool(dev, "non-removable")) {
|
||||
host->caps |= MMC_CAP_NONREMOVABLE;
|
||||
} else {
|
||||
cd_cap_invert = of_property_read_bool(np, "cd-inverted");
|
||||
cd_cap_invert = device_property_read_bool(dev, "cd-inverted");
|
||||
|
||||
if (of_property_read_bool(np, "broken-cd"))
|
||||
if (device_property_read_bool(dev, "broken-cd"))
|
||||
host->caps |= MMC_CAP_NEEDS_POLL;
|
||||
|
||||
ret = mmc_gpiod_request_cd(host, "cd", 0, true,
|
||||
|
@ -257,7 +255,7 @@ int mmc_of_parse(struct mmc_host *host)
|
|||
}
|
||||
|
||||
/* Parse Write Protection */
|
||||
ro_cap_invert = of_property_read_bool(np, "wp-inverted");
|
||||
ro_cap_invert = device_property_read_bool(dev, "wp-inverted");
|
||||
|
||||
ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
|
||||
if (!ret)
|
||||
|
@ -265,62 +263,62 @@ int mmc_of_parse(struct mmc_host *host)
|
|||
else if (ret != -ENOENT && ret != -ENOSYS)
|
||||
return ret;
|
||||
|
||||
if (of_property_read_bool(np, "disable-wp"))
|
||||
if (device_property_read_bool(dev, "disable-wp"))
|
||||
host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
|
||||
|
||||
/* See the comment on CD inversion above */
|
||||
if (ro_cap_invert ^ ro_gpio_invert)
|
||||
host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
|
||||
|
||||
if (of_property_read_bool(np, "cap-sd-highspeed"))
|
||||
if (device_property_read_bool(dev, "cap-sd-highspeed"))
|
||||
host->caps |= MMC_CAP_SD_HIGHSPEED;
|
||||
if (of_property_read_bool(np, "cap-mmc-highspeed"))
|
||||
if (device_property_read_bool(dev, "cap-mmc-highspeed"))
|
||||
host->caps |= MMC_CAP_MMC_HIGHSPEED;
|
||||
if (of_property_read_bool(np, "sd-uhs-sdr12"))
|
||||
if (device_property_read_bool(dev, "sd-uhs-sdr12"))
|
||||
host->caps |= MMC_CAP_UHS_SDR12;
|
||||
if (of_property_read_bool(np, "sd-uhs-sdr25"))
|
||||
if (device_property_read_bool(dev, "sd-uhs-sdr25"))
|
||||
host->caps |= MMC_CAP_UHS_SDR25;
|
||||
if (of_property_read_bool(np, "sd-uhs-sdr50"))
|
||||
if (device_property_read_bool(dev, "sd-uhs-sdr50"))
|
||||
host->caps |= MMC_CAP_UHS_SDR50;
|
||||
if (of_property_read_bool(np, "sd-uhs-sdr104"))
|
||||
if (device_property_read_bool(dev, "sd-uhs-sdr104"))
|
||||
host->caps |= MMC_CAP_UHS_SDR104;
|
||||
if (of_property_read_bool(np, "sd-uhs-ddr50"))
|
||||
if (device_property_read_bool(dev, "sd-uhs-ddr50"))
|
||||
host->caps |= MMC_CAP_UHS_DDR50;
|
||||
if (of_property_read_bool(np, "cap-power-off-card"))
|
||||
if (device_property_read_bool(dev, "cap-power-off-card"))
|
||||
host->caps |= MMC_CAP_POWER_OFF_CARD;
|
||||
if (of_property_read_bool(np, "cap-mmc-hw-reset"))
|
||||
if (device_property_read_bool(dev, "cap-mmc-hw-reset"))
|
||||
host->caps |= MMC_CAP_HW_RESET;
|
||||
if (of_property_read_bool(np, "cap-sdio-irq"))
|
||||
if (device_property_read_bool(dev, "cap-sdio-irq"))
|
||||
host->caps |= MMC_CAP_SDIO_IRQ;
|
||||
if (of_property_read_bool(np, "full-pwr-cycle"))
|
||||
if (device_property_read_bool(dev, "full-pwr-cycle"))
|
||||
host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE;
|
||||
if (of_property_read_bool(np, "keep-power-in-suspend"))
|
||||
if (device_property_read_bool(dev, "keep-power-in-suspend"))
|
||||
host->pm_caps |= MMC_PM_KEEP_POWER;
|
||||
if (of_property_read_bool(np, "wakeup-source") ||
|
||||
of_property_read_bool(np, "enable-sdio-wakeup")) /* legacy */
|
||||
if (device_property_read_bool(dev, "wakeup-source") ||
|
||||
device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
|
||||
host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
|
||||
if (of_property_read_bool(np, "mmc-ddr-1_8v"))
|
||||
if (device_property_read_bool(dev, "mmc-ddr-1_8v"))
|
||||
host->caps |= MMC_CAP_1_8V_DDR;
|
||||
if (of_property_read_bool(np, "mmc-ddr-1_2v"))
|
||||
if (device_property_read_bool(dev, "mmc-ddr-1_2v"))
|
||||
host->caps |= MMC_CAP_1_2V_DDR;
|
||||
if (of_property_read_bool(np, "mmc-hs200-1_8v"))
|
||||
if (device_property_read_bool(dev, "mmc-hs200-1_8v"))
|
||||
host->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
|
||||
if (of_property_read_bool(np, "mmc-hs200-1_2v"))
|
||||
if (device_property_read_bool(dev, "mmc-hs200-1_2v"))
|
||||
host->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
|
||||
if (of_property_read_bool(np, "mmc-hs400-1_8v"))
|
||||
if (device_property_read_bool(dev, "mmc-hs400-1_8v"))
|
||||
host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR;
|
||||
if (of_property_read_bool(np, "mmc-hs400-1_2v"))
|
||||
if (device_property_read_bool(dev, "mmc-hs400-1_2v"))
|
||||
host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR;
|
||||
if (of_property_read_bool(np, "mmc-hs400-enhanced-strobe"))
|
||||
if (device_property_read_bool(dev, "mmc-hs400-enhanced-strobe"))
|
||||
host->caps2 |= MMC_CAP2_HS400_ES;
|
||||
if (of_property_read_bool(np, "no-sdio"))
|
||||
if (device_property_read_bool(dev, "no-sdio"))
|
||||
host->caps2 |= MMC_CAP2_NO_SDIO;
|
||||
if (of_property_read_bool(np, "no-sd"))
|
||||
if (device_property_read_bool(dev, "no-sd"))
|
||||
host->caps2 |= MMC_CAP2_NO_SD;
|
||||
if (of_property_read_bool(np, "no-mmc"))
|
||||
if (device_property_read_bool(dev, "no-mmc"))
|
||||
host->caps2 |= MMC_CAP2_NO_MMC;
|
||||
|
||||
host->dsr_req = !of_property_read_u32(np, "dsr", &host->dsr);
|
||||
host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr);
|
||||
if (host->dsr_req && (host->dsr & ~0xffff)) {
|
||||
dev_err(host->parent,
|
||||
"device tree specified broken value for DSR: 0x%x, ignoring\n",
|
||||
|
|
|
@ -1704,7 +1704,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
|
|||
err = mmc_select_hs400(card);
|
||||
if (err)
|
||||
goto free_card;
|
||||
} else {
|
||||
} else if (!mmc_card_hs400es(card)) {
|
||||
/* Select the desired bus width optionally */
|
||||
err = mmc_select_bus_width(card);
|
||||
if (err > 0 && mmc_card_hs(card)) {
|
||||
|
|
|
@ -2610,8 +2610,8 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
|
|||
host->slot[id] = slot;
|
||||
|
||||
mmc->ops = &dw_mci_ops;
|
||||
if (of_property_read_u32_array(host->dev->of_node,
|
||||
"clock-freq-min-max", freq, 2)) {
|
||||
if (device_property_read_u32_array(host->dev, "clock-freq-min-max",
|
||||
freq, 2)) {
|
||||
mmc->f_min = DW_MCI_FREQ_MIN;
|
||||
mmc->f_max = DW_MCI_FREQ_MAX;
|
||||
} else {
|
||||
|
@ -2709,7 +2709,6 @@ static void dw_mci_init_dma(struct dw_mci *host)
|
|||
{
|
||||
int addr_config;
|
||||
struct device *dev = host->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
|
||||
/*
|
||||
* Check tansfer mode from HCON[17:16]
|
||||
|
@ -2770,8 +2769,9 @@ static void dw_mci_init_dma(struct dw_mci *host)
|
|||
dev_info(host->dev, "Using internal DMA controller.\n");
|
||||
} else {
|
||||
/* TRANS_MODE_EDMAC: check dma bindings again */
|
||||
if ((of_property_count_strings(np, "dma-names") < 0) ||
|
||||
(!of_find_property(np, "dmas", NULL))) {
|
||||
if ((device_property_read_string_array(dev, "dma-names",
|
||||
NULL, 0) < 0) ||
|
||||
!device_property_present(dev, "dmas")) {
|
||||
goto no_dma;
|
||||
}
|
||||
host->dma_ops = &dw_mci_edmac_ops;
|
||||
|
@ -2931,7 +2931,6 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
|
|||
{
|
||||
struct dw_mci_board *pdata;
|
||||
struct device *dev = host->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
const struct dw_mci_drv_data *drv_data = host->drv_data;
|
||||
int ret;
|
||||
u32 clock_frequency;
|
||||
|
@ -2948,15 +2947,16 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
|
|||
}
|
||||
|
||||
/* find out number of slots supported */
|
||||
of_property_read_u32(np, "num-slots", &pdata->num_slots);
|
||||
device_property_read_u32(dev, "num-slots", &pdata->num_slots);
|
||||
|
||||
if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
|
||||
if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
|
||||
dev_info(dev,
|
||||
"fifo-depth property not found, using value of FIFOTH register as default\n");
|
||||
|
||||
of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms);
|
||||
device_property_read_u32(dev, "card-detect-delay",
|
||||
&pdata->detect_delay_ms);
|
||||
|
||||
if (!of_property_read_u32(np, "clock-frequency", &clock_frequency))
|
||||
if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
|
||||
pdata->bus_hz = clock_frequency;
|
||||
|
||||
if (drv_data && drv_data->parse_dt) {
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
|
||||
#define SDMMC_MC1R 0x204
|
||||
#define SDMMC_MC1R_DDR BIT(3)
|
||||
#define SDMMC_MC1R_FCD BIT(7)
|
||||
#define SDMMC_CACR 0x230
|
||||
#define SDMMC_CACR_CAPWREN BIT(0)
|
||||
#define SDMMC_CACR_KEY (0x46 << 8)
|
||||
|
@ -43,6 +44,15 @@ struct sdhci_at91_priv {
|
|||
struct clk *mainck;
|
||||
};
|
||||
|
||||
static void sdhci_at91_set_force_card_detect(struct sdhci_host *host)
|
||||
{
|
||||
u8 mc1r;
|
||||
|
||||
mc1r = readb(host->ioaddr + SDMMC_MC1R);
|
||||
mc1r |= SDMMC_MC1R_FCD;
|
||||
writeb(mc1r, host->ioaddr + SDMMC_MC1R);
|
||||
}
|
||||
|
||||
static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock)
|
||||
{
|
||||
u16 clk;
|
||||
|
@ -112,10 +122,18 @@ void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
|
|||
sdhci_set_uhs_signaling(host, timing);
|
||||
}
|
||||
|
||||
static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
|
||||
{
|
||||
sdhci_reset(host, mask);
|
||||
|
||||
if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
|
||||
sdhci_at91_set_force_card_detect(host);
|
||||
}
|
||||
|
||||
static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
|
||||
.set_clock = sdhci_at91_set_clock,
|
||||
.set_bus_width = sdhci_set_bus_width,
|
||||
.reset = sdhci_reset,
|
||||
.reset = sdhci_at91_reset,
|
||||
.set_uhs_signaling = sdhci_at91_set_uhs_signaling,
|
||||
.set_power = sdhci_at91_set_power,
|
||||
};
|
||||
|
@ -322,6 +340,21 @@ static int sdhci_at91_probe(struct platform_device *pdev)
|
|||
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the device attached to the MMC bus is not removable, it is safer
|
||||
* to set the Force Card Detect bit. People often don't connect the
|
||||
* card detect signal and use this pin for another purpose. If the card
|
||||
* detect pin is not muxed to SDHCI controller, a default value is
|
||||
* used. This value can be different from a SoC revision to another
|
||||
* one. Problems come when this default value is not card present. To
|
||||
* avoid this case, if the device is non removable then the card
|
||||
* detection procedure using the SDMCC_CD signal is bypassed.
|
||||
* This bit is reset when a software reset for all command is performed
|
||||
* so we need to implement our own reset function to set back this bit.
|
||||
*/
|
||||
if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
|
||||
sdhci_at91_set_force_card_detect(host);
|
||||
|
||||
pm_runtime_put_autosuspend(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1558,6 +1558,7 @@ static const struct b53_chip_data b53_switch_chips[] = {
|
|||
.dev_name = "BCM53125",
|
||||
.vlans = 4096,
|
||||
.enabled_ports = 0xff,
|
||||
.arl_entries = 4,
|
||||
.cpu_port = B53_CPU_PORT,
|
||||
.vta_regs = B53_VTA_REGS,
|
||||
.duplex_reg = B53_DUPLEX_STAT_GE,
|
||||
|
|
|
@ -609,7 +609,7 @@ static void nb8800_mac_config(struct net_device *dev)
|
|||
mac_mode |= HALF_DUPLEX;
|
||||
|
||||
if (gigabit) {
|
||||
if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
|
||||
if (phy_interface_is_rgmii(dev->phydev))
|
||||
mac_mode |= RGMII_MODE;
|
||||
|
||||
mac_mode |= GMAC_MODE;
|
||||
|
@ -1277,11 +1277,10 @@ static int nb8800_tangox_init(struct net_device *dev)
|
|||
break;
|
||||
|
||||
case PHY_INTERFACE_MODE_RGMII:
|
||||
pad_mode = PAD_MODE_RGMII;
|
||||
break;
|
||||
|
||||
case PHY_INTERFACE_MODE_RGMII_ID:
|
||||
case PHY_INTERFACE_MODE_RGMII_RXID:
|
||||
case PHY_INTERFACE_MODE_RGMII_TXID:
|
||||
pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
|
||||
pad_mode = PAD_MODE_RGMII;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
|
|
@ -8720,11 +8720,14 @@ static void tg3_free_consistent(struct tg3 *tp)
|
|||
tg3_mem_rx_release(tp);
|
||||
tg3_mem_tx_release(tp);
|
||||
|
||||
/* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
|
||||
tg3_full_lock(tp, 0);
|
||||
if (tp->hw_stats) {
|
||||
dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
|
||||
tp->hw_stats, tp->stats_mapping);
|
||||
tp->hw_stats = NULL;
|
||||
}
|
||||
tg3_full_unlock(tp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -770,6 +770,10 @@ static void cb_timeout_handler(struct work_struct *work)
|
|||
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
|
||||
}
|
||||
|
||||
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
|
||||
static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
|
||||
struct mlx5_cmd_msg *msg);
|
||||
|
||||
static void cmd_work_handler(struct work_struct *work)
|
||||
{
|
||||
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
|
||||
|
@ -779,16 +783,27 @@ static void cmd_work_handler(struct work_struct *work)
|
|||
struct mlx5_cmd_layout *lay;
|
||||
struct semaphore *sem;
|
||||
unsigned long flags;
|
||||
int alloc_ret;
|
||||
|
||||
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
|
||||
down(sem);
|
||||
if (!ent->page_queue) {
|
||||
ent->idx = alloc_ent(cmd);
|
||||
if (ent->idx < 0) {
|
||||
alloc_ret = alloc_ent(cmd);
|
||||
if (alloc_ret < 0) {
|
||||
if (ent->callback) {
|
||||
ent->callback(-EAGAIN, ent->context);
|
||||
mlx5_free_cmd_msg(dev, ent->out);
|
||||
free_msg(dev, ent->in);
|
||||
free_cmd(ent);
|
||||
} else {
|
||||
ent->ret = -EAGAIN;
|
||||
complete(&ent->done);
|
||||
}
|
||||
mlx5_core_err(dev, "failed to allocate command entry\n");
|
||||
up(sem);
|
||||
return;
|
||||
}
|
||||
ent->idx = alloc_ret;
|
||||
} else {
|
||||
ent->idx = cmd->max_reg_cmds;
|
||||
spin_lock_irqsave(&cmd->alloc_lock, flags);
|
||||
|
|
|
@ -62,12 +62,14 @@ static void mlx5e_timestamp_overflow(struct work_struct *work)
|
|||
struct delayed_work *dwork = to_delayed_work(work);
|
||||
struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
|
||||
overflow_work);
|
||||
struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&tstamp->lock, flags);
|
||||
timecounter_read(&tstamp->clock);
|
||||
write_unlock_irqrestore(&tstamp->lock, flags);
|
||||
schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
|
||||
queue_delayed_work(priv->wq, &tstamp->overflow_work,
|
||||
msecs_to_jiffies(tstamp->overflow_period * 1000));
|
||||
}
|
||||
|
||||
int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
|
||||
|
@ -263,7 +265,7 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
|
|||
|
||||
INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
|
||||
if (tstamp->overflow_period)
|
||||
schedule_delayed_work(&tstamp->overflow_work, 0);
|
||||
queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
|
||||
else
|
||||
mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
|
||||
|
||||
|
|
|
@ -276,7 +276,7 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
|
|||
|
||||
static bool outer_header_zero(u32 *match_criteria)
|
||||
{
|
||||
int size = MLX5_ST_SZ_BYTES(fte_match_param);
|
||||
int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
|
||||
char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
|
||||
outer_headers);
|
||||
|
||||
|
|
|
@ -651,9 +651,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
|
|||
int vport;
|
||||
int err;
|
||||
|
||||
/* disable PF RoCE so missed packets don't go through RoCE steering */
|
||||
mlx5_dev_list_lock();
|
||||
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_dev_list_unlock();
|
||||
|
||||
err = esw_create_offloads_fdb_table(esw, nvports);
|
||||
if (err)
|
||||
return err;
|
||||
goto create_fdb_err;
|
||||
|
||||
err = esw_create_offloads_table(esw);
|
||||
if (err)
|
||||
|
@ -673,11 +678,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
|
|||
goto err_reps;
|
||||
}
|
||||
|
||||
/* disable PF RoCE so missed packets don't go through RoCE steering */
|
||||
mlx5_dev_list_lock();
|
||||
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_dev_list_unlock();
|
||||
|
||||
return 0;
|
||||
|
||||
err_reps:
|
||||
|
@ -694,6 +694,13 @@ create_fg_err:
|
|||
|
||||
create_ft_err:
|
||||
esw_destroy_offloads_fdb_table(esw);
|
||||
|
||||
create_fdb_err:
|
||||
/* enable back PF RoCE */
|
||||
mlx5_dev_list_lock();
|
||||
mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_dev_list_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -701,11 +708,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
|
|||
{
|
||||
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
|
||||
|
||||
/* enable back PF RoCE */
|
||||
mlx5_dev_list_lock();
|
||||
mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_dev_list_unlock();
|
||||
|
||||
mlx5_eswitch_disable_sriov(esw);
|
||||
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
|
||||
if (err) {
|
||||
|
@ -715,6 +717,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
|
|||
esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
|
||||
}
|
||||
|
||||
/* enable back PF RoCE */
|
||||
mlx5_dev_list_lock();
|
||||
mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_dev_list_unlock();
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -157,22 +157,17 @@ static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev)
|
|||
static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
|
||||
u8 *port1, u8 *port2)
|
||||
{
|
||||
if (tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) {
|
||||
if (tracker->netdev_state[0].tx_enabled) {
|
||||
*port1 = 1;
|
||||
*port2 = 1;
|
||||
} else {
|
||||
*port1 = 2;
|
||||
*port2 = 2;
|
||||
}
|
||||
} else {
|
||||
*port1 = 1;
|
||||
*port2 = 2;
|
||||
if (!tracker->netdev_state[0].link_up)
|
||||
if (!tracker->netdev_state[0].tx_enabled ||
|
||||
!tracker->netdev_state[0].link_up) {
|
||||
*port1 = 2;
|
||||
else if (!tracker->netdev_state[1].link_up)
|
||||
*port2 = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!tracker->netdev_state[1].tx_enabled ||
|
||||
!tracker->netdev_state[1].link_up)
|
||||
*port2 = 1;
|
||||
}
|
||||
|
||||
static void mlx5_activate_lag(struct mlx5_lag *ldev,
|
||||
|
|
|
@ -574,6 +574,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
|
|||
.rpadir_value = 2 << 16,
|
||||
.no_trimd = 1,
|
||||
.no_ade = 1,
|
||||
.hw_crc = 1,
|
||||
.tsu = 1,
|
||||
.select_mii = 1,
|
||||
.shift_rd0 = 1,
|
||||
|
@ -802,7 +803,7 @@ static struct sh_eth_cpu_data sh7734_data = {
|
|||
|
||||
.ecsr_value = ECSR_ICD | ECSR_MPD,
|
||||
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
|
||||
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
|
||||
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
|
||||
|
||||
.tx_check = EESR_TC1 | EESR_FTC,
|
||||
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
|
||||
|
@ -832,7 +833,7 @@ static struct sh_eth_cpu_data sh7763_data = {
|
|||
|
||||
.ecsr_value = ECSR_ICD | ECSR_MPD,
|
||||
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
|
||||
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
|
||||
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
|
||||
|
||||
.tx_check = EESR_TC1 | EESR_FTC,
|
||||
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
|
||||
|
|
|
@ -141,10 +141,20 @@ static int mcs_set_reg(struct mcs_cb *mcs, __u16 reg, __u16 val)
|
|||
static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
|
||||
{
|
||||
struct usb_device *dev = mcs->usbdev;
|
||||
int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
|
||||
MCS_RD_RTYPE, 0, reg, val, 2,
|
||||
void *dmabuf;
|
||||
int ret;
|
||||
|
||||
dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
|
||||
if (!dmabuf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
|
||||
MCS_RD_RTYPE, 0, reg, dmabuf, 2,
|
||||
msecs_to_jiffies(MCS_CTRL_TIMEOUT));
|
||||
|
||||
memcpy(val, dmabuf, sizeof(__u16));
|
||||
kfree(dmabuf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#define MII_DP83867_MICR 0x12
|
||||
#define MII_DP83867_ISR 0x13
|
||||
#define DP83867_CTRL 0x1f
|
||||
#define DP83867_CFG3 0x1e
|
||||
|
||||
/* Extended Registers */
|
||||
#define DP83867_RGMIICTL 0x0032
|
||||
|
@ -90,6 +91,8 @@ static int dp83867_config_intr(struct phy_device *phydev)
|
|||
micr_status |=
|
||||
(MII_DP83867_MICR_AN_ERR_INT_EN |
|
||||
MII_DP83867_MICR_SPEED_CHNG_INT_EN |
|
||||
MII_DP83867_MICR_AUTONEG_COMP_INT_EN |
|
||||
MII_DP83867_MICR_LINK_STS_CHNG_INT_EN |
|
||||
MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
|
||||
MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
|
||||
|
||||
|
@ -190,6 +193,13 @@ static int dp83867_config_init(struct phy_device *phydev)
|
|||
DP83867_DEVADDR, delay);
|
||||
}
|
||||
|
||||
/* Enable Interrupt output INT_OE in CFG3 register */
|
||||
if (phy_interrupt_is_valid(phydev)) {
|
||||
val = phy_read(phydev, DP83867_CFG3);
|
||||
val |= BIT(7);
|
||||
phy_write(phydev, DP83867_CFG3, val);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -674,6 +674,9 @@ void phy_stop_machine(struct phy_device *phydev)
|
|||
if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
|
||||
phydev->state = PHY_UP;
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
/* Now we can run the state machine synchronously */
|
||||
phy_state_machine(&phydev->state_queue.work);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1060,6 +1063,15 @@ void phy_state_machine(struct work_struct *work)
|
|||
if (old_link != phydev->link)
|
||||
phydev->state = PHY_CHANGELINK;
|
||||
}
|
||||
/*
|
||||
* Failsafe: check that nobody set phydev->link=0 between two
|
||||
* poll cycles, otherwise we won't leave RUNNING state as long
|
||||
* as link remains down.
|
||||
*/
|
||||
if (!phydev->link && phydev->state == PHY_RUNNING) {
|
||||
phydev->state = PHY_CHANGELINK;
|
||||
phydev_err(phydev, "no link in PHY_RUNNING\n");
|
||||
}
|
||||
break;
|
||||
case PHY_CHANGELINK:
|
||||
err = phy_read_status(phydev);
|
||||
|
|
|
@ -1714,6 +1714,8 @@ static int phy_remove(struct device *dev)
|
|||
{
|
||||
struct phy_device *phydev = to_phy_device(dev);
|
||||
|
||||
cancel_delayed_work_sync(&phydev->state_queue);
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
phydev->state = PHY_DOWN;
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
|
|
@ -4161,11 +4161,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
|
|||
goto fail;
|
||||
}
|
||||
|
||||
/* allocate scatter-gather table. sg support
|
||||
* will be disabled upon allocation failure.
|
||||
*/
|
||||
brcmf_sdiod_sgtable_alloc(bus->sdiodev);
|
||||
|
||||
/* Query the F2 block size, set roundup accordingly */
|
||||
bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
|
||||
bus->roundup = min(max_roundup, bus->blocksize);
|
||||
|
|
|
@ -1190,11 +1190,11 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
|
|||
next_reclaimed;
|
||||
IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
|
||||
next_reclaimed);
|
||||
iwlagn_check_ratid_empty(priv, sta_id, tid);
|
||||
}
|
||||
|
||||
iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
|
||||
|
||||
iwlagn_check_ratid_empty(priv, sta_id, tid);
|
||||
freed = 0;
|
||||
|
||||
/* process frames */
|
||||
|
|
|
@ -199,6 +199,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
|
|||
unsigned long remaining_credit;
|
||||
struct timer_list credit_timeout;
|
||||
u64 credit_window_start;
|
||||
bool rate_limited;
|
||||
|
||||
/* Statistics */
|
||||
struct xenvif_stats stats;
|
||||
|
|
|
@ -105,6 +105,10 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
|
|||
|
||||
if (work_done < budget) {
|
||||
napi_complete(napi);
|
||||
/* If the queue is rate-limited, it shall be
|
||||
* rescheduled in the timer callback.
|
||||
*/
|
||||
if (likely(!queue->rate_limited))
|
||||
xenvif_napi_schedule_or_enable_events(queue);
|
||||
}
|
||||
|
||||
|
|
|
@ -179,6 +179,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
|
|||
max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
|
||||
|
||||
queue->remaining_credit = min(max_credit, max_burst);
|
||||
queue->rate_limited = false;
|
||||
}
|
||||
|
||||
void xenvif_tx_credit_callback(unsigned long data)
|
||||
|
@ -685,8 +686,10 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
|
|||
msecs_to_jiffies(queue->credit_usec / 1000);
|
||||
|
||||
/* Timer could already be pending in rare cases. */
|
||||
if (timer_pending(&queue->credit_timeout))
|
||||
if (timer_pending(&queue->credit_timeout)) {
|
||||
queue->rate_limited = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Passed the point where we can replenish credit? */
|
||||
if (time_after_eq64(now, next_credit)) {
|
||||
|
@ -701,6 +704,7 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
|
|||
mod_timer(&queue->credit_timeout,
|
||||
next_credit);
|
||||
queue->credit_window_start = next_credit;
|
||||
queue->rate_limited = true;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -243,12 +243,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
|
|||
struct qla_hw_data *ha = vha->hw;
|
||||
ssize_t rval = 0;
|
||||
|
||||
if (ha->optrom_state != QLA_SREADING)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&ha->optrom_mutex);
|
||||
|
||||
if (ha->optrom_state != QLA_SREADING)
|
||||
goto out;
|
||||
|
||||
rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
|
||||
ha->optrom_region_size);
|
||||
|
||||
out:
|
||||
mutex_unlock(&ha->optrom_mutex);
|
||||
|
||||
return rval;
|
||||
|
@ -263,14 +266,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
|
|||
struct device, kobj)));
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
if (ha->optrom_state != QLA_SWRITING)
|
||||
mutex_lock(&ha->optrom_mutex);
|
||||
|
||||
if (ha->optrom_state != QLA_SWRITING) {
|
||||
mutex_unlock(&ha->optrom_mutex);
|
||||
return -EINVAL;
|
||||
if (off > ha->optrom_region_size)
|
||||
}
|
||||
if (off > ha->optrom_region_size) {
|
||||
mutex_unlock(&ha->optrom_mutex);
|
||||
return -ERANGE;
|
||||
}
|
||||
if (off + count > ha->optrom_region_size)
|
||||
count = ha->optrom_region_size - off;
|
||||
|
||||
mutex_lock(&ha->optrom_mutex);
|
||||
memcpy(&ha->optrom_buffer[off], buf, count);
|
||||
mutex_unlock(&ha->optrom_mutex);
|
||||
|
||||
|
|
|
@ -494,7 +494,8 @@ static int spi_engine_probe(struct platform_device *pdev)
|
|||
SPI_ENGINE_VERSION_MAJOR(version),
|
||||
SPI_ENGINE_VERSION_MINOR(version),
|
||||
SPI_ENGINE_VERSION_PATCH(version));
|
||||
return -ENODEV;
|
||||
ret = -ENODEV;
|
||||
goto err_put_master;
|
||||
}
|
||||
|
||||
spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
|
||||
|
|
|
@ -490,14 +490,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn)
|
|||
|
||||
static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
|
||||
|
||||
static bool iscsi_target_sk_state_check(struct sock *sk)
|
||||
static bool __iscsi_target_sk_check_close(struct sock *sk)
|
||||
{
|
||||
if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
|
||||
pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE,"
|
||||
pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE,"
|
||||
"returning FALSE\n");
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
||||
static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
|
||||
{
|
||||
bool state = false;
|
||||
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
state = (__iscsi_target_sk_check_close(sk) ||
|
||||
test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
return state;
|
||||
}
|
||||
|
||||
static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag)
|
||||
{
|
||||
bool state = false;
|
||||
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
state = test_bit(flag, &conn->login_flags);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
return state;
|
||||
}
|
||||
|
||||
static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag)
|
||||
{
|
||||
bool state = false;
|
||||
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
state = (__iscsi_target_sk_check_close(sk) ||
|
||||
test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
|
||||
if (!state)
|
||||
clear_bit(flag, &conn->login_flags);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
return state;
|
||||
}
|
||||
|
||||
static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
|
||||
|
@ -537,6 +583,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
|
|||
|
||||
pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
|
||||
conn, current->comm, current->pid);
|
||||
/*
|
||||
* If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
|
||||
* before initial PDU processing in iscsi_target_start_negotiation()
|
||||
* has completed, go ahead and retry until it's cleared.
|
||||
*
|
||||
* Otherwise if the TCP connection drops while this is occuring,
|
||||
* iscsi_target_start_negotiation() will detect the failure, call
|
||||
* cancel_delayed_work_sync(&conn->login_work), and cleanup the
|
||||
* remaining iscsi connection resources from iscsi_np process context.
|
||||
*/
|
||||
if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
|
||||
schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&tpg->tpg_state_lock);
|
||||
state = (tpg->tpg_state == TPG_STATE_ACTIVE);
|
||||
|
@ -544,26 +604,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
|
|||
|
||||
if (!state) {
|
||||
pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
iscsi_target_login_drop(conn, login);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
return;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
state = iscsi_target_sk_state_check(sk);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
if (!state) {
|
||||
if (iscsi_target_sk_check_close(conn)) {
|
||||
pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
iscsi_target_login_drop(conn, login);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
return;
|
||||
}
|
||||
goto err;
|
||||
}
|
||||
|
||||
conn->login_kworker = current;
|
||||
|
@ -581,34 +627,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
|
|||
flush_signals(current);
|
||||
conn->login_kworker = NULL;
|
||||
|
||||
if (rc < 0) {
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
iscsi_target_login_drop(conn, login);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
return;
|
||||
}
|
||||
if (rc < 0)
|
||||
goto err;
|
||||
|
||||
pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
|
||||
conn, current->comm, current->pid);
|
||||
|
||||
rc = iscsi_target_do_login(conn, login);
|
||||
if (rc < 0) {
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
iscsi_target_login_drop(conn, login);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
goto err;
|
||||
} else if (!rc) {
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE))
|
||||
goto err;
|
||||
} else if (rc == 1) {
|
||||
iscsi_target_nego_release(conn);
|
||||
iscsi_post_login_handler(np, conn, zero_tsih);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
}
|
||||
return;
|
||||
|
||||
err:
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
iscsi_target_login_drop(conn, login);
|
||||
iscsit_deaccess_np(np, tpg, tpg_np);
|
||||
}
|
||||
|
||||
static void iscsi_target_do_cleanup(struct work_struct *work)
|
||||
|
@ -656,31 +697,54 @@ static void iscsi_target_sk_state_change(struct sock *sk)
|
|||
orig_state_change(sk);
|
||||
return;
|
||||
}
|
||||
state = __iscsi_target_sk_check_close(sk);
|
||||
pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
|
||||
|
||||
if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
|
||||
pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
|
||||
" conn: %p\n", conn);
|
||||
if (state)
|
||||
set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
orig_state_change(sk);
|
||||
return;
|
||||
}
|
||||
if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
|
||||
if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
|
||||
pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
|
||||
conn);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
orig_state_change(sk);
|
||||
return;
|
||||
}
|
||||
|
||||
state = iscsi_target_sk_state_check(sk);
|
||||
/*
|
||||
* If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED,
|
||||
* but only queue conn->login_work -> iscsi_target_do_login_rx()
|
||||
* processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
|
||||
*
|
||||
* When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
|
||||
* will detect the dropped TCP connection from delayed workqueue context.
|
||||
*
|
||||
* If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
|
||||
* iscsi_target_start_negotiation() is running, iscsi_target_do_login()
|
||||
* via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
|
||||
* via iscsi_target_sk_check_and_clear() is responsible for detecting the
|
||||
* dropped TCP connection in iscsi_np process context, and cleaning up
|
||||
* the remaining iscsi connection resources.
|
||||
*/
|
||||
if (state) {
|
||||
pr_debug("iscsi_target_sk_state_change got failed state\n");
|
||||
set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
|
||||
state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
pr_debug("iscsi_target_sk_state_change: state: %d\n", state);
|
||||
orig_state_change(sk);
|
||||
|
||||
if (!state) {
|
||||
pr_debug("iscsi_target_sk_state_change got failed state\n");
|
||||
schedule_delayed_work(&conn->login_cleanup_work, 0);
|
||||
if (!state)
|
||||
schedule_delayed_work(&conn->login_work, 0);
|
||||
return;
|
||||
}
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
orig_state_change(sk);
|
||||
}
|
||||
|
||||
|
@ -945,6 +1009,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
|
|||
if (iscsi_target_handle_csg_one(conn, login) < 0)
|
||||
return -1;
|
||||
if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
|
||||
/*
|
||||
* Check to make sure the TCP connection has not
|
||||
* dropped asynchronously while session reinstatement
|
||||
* was occuring in this kthread context, before
|
||||
* transitioning to full feature phase operation.
|
||||
*/
|
||||
if (iscsi_target_sk_check_close(conn))
|
||||
return -1;
|
||||
|
||||
login->tsih = conn->sess->tsih;
|
||||
login->login_complete = 1;
|
||||
iscsi_target_restore_sock_callbacks(conn);
|
||||
|
@ -971,21 +1044,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo
|
|||
break;
|
||||
}
|
||||
|
||||
if (conn->sock) {
|
||||
struct sock *sk = conn->sock->sk;
|
||||
bool state;
|
||||
|
||||
read_lock_bh(&sk->sk_callback_lock);
|
||||
state = iscsi_target_sk_state_check(sk);
|
||||
read_unlock_bh(&sk->sk_callback_lock);
|
||||
|
||||
if (!state) {
|
||||
pr_debug("iscsi_target_do_login() failed state for"
|
||||
" conn: %p\n", conn);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1254,10 +1312,22 @@ int iscsi_target_start_negotiation(
|
|||
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
|
||||
set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
|
||||
write_unlock_bh(&sk->sk_callback_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* If iscsi_target_do_login returns zero to signal more PDU
|
||||
* exchanges are required to complete the login, go ahead and
|
||||
* clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
|
||||
* is still active.
|
||||
*
|
||||
* Otherwise if TCP connection dropped asynchronously, go ahead
|
||||
* and perform connection cleanup now.
|
||||
*/
|
||||
ret = iscsi_target_do_login(conn, login);
|
||||
if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
|
||||
ret = -1;
|
||||
|
||||
if (ret < 0) {
|
||||
cancel_delayed_work_sync(&conn->login_work);
|
||||
cancel_delayed_work_sync(&conn->login_cleanup_work);
|
||||
|
|
|
@ -4759,10 +4759,6 @@ skip_async:
|
|||
else
|
||||
flush = BTRFS_RESERVE_NO_FLUSH;
|
||||
spin_lock(&space_info->lock);
|
||||
if (can_overcommit(root, space_info, orig, flush)) {
|
||||
spin_unlock(&space_info->lock);
|
||||
break;
|
||||
}
|
||||
if (list_empty(&space_info->tickets) &&
|
||||
list_empty(&space_info->priority_tickets)) {
|
||||
spin_unlock(&space_info->lock);
|
||||
|
|
|
@ -469,6 +469,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
|
|||
lastoff = page_offset(page);
|
||||
bh = head = page_buffers(page);
|
||||
do {
|
||||
if (lastoff + bh->b_size <= startoff)
|
||||
goto next;
|
||||
if (buffer_uptodate(bh) ||
|
||||
buffer_unwritten(bh)) {
|
||||
if (whence == SEEK_DATA)
|
||||
|
@ -483,6 +485,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
|
|||
unlock_page(page);
|
||||
goto out;
|
||||
}
|
||||
next:
|
||||
lastoff += bh->b_size;
|
||||
bh = bh->b_this_page;
|
||||
} while (bh != head);
|
||||
|
|
|
@ -1926,7 +1926,8 @@ retry:
|
|||
n_desc_blocks = o_desc_blocks +
|
||||
le16_to_cpu(es->s_reserved_gdt_blocks);
|
||||
n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
|
||||
n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb);
|
||||
n_blocks_count = (ext4_fsblk_t)n_group *
|
||||
EXT4_BLOCKS_PER_GROUP(sb);
|
||||
n_group--; /* set to last group number */
|
||||
}
|
||||
|
||||
|
|
|
@ -7410,7 +7410,7 @@ static void nfs4_exchange_id_done(struct rpc_task *task, void *data)
|
|||
cdata->res.server_scope = NULL;
|
||||
}
|
||||
/* Save the EXCHANGE_ID verifier session trunk tests */
|
||||
memcpy(clp->cl_confirm.data, cdata->args.verifier->data,
|
||||
memcpy(clp->cl_confirm.data, cdata->args.verifier.data,
|
||||
sizeof(clp->cl_confirm.data));
|
||||
}
|
||||
out:
|
||||
|
@ -7447,7 +7447,6 @@ static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
|
|||
static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
|
||||
u32 sp4_how, struct rpc_xprt *xprt)
|
||||
{
|
||||
nfs4_verifier verifier;
|
||||
struct rpc_message msg = {
|
||||
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
|
||||
.rpc_cred = cred,
|
||||
|
@ -7470,8 +7469,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
|
|||
if (!calldata)
|
||||
goto out;
|
||||
|
||||
if (!xprt)
|
||||
nfs4_init_boot_verifier(clp, &verifier);
|
||||
nfs4_init_boot_verifier(clp, &calldata->args.verifier);
|
||||
|
||||
status = nfs4_init_uniform_client_string(clp);
|
||||
if (status)
|
||||
|
@ -7516,9 +7514,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
|
|||
task_setup_data.rpc_xprt = xprt;
|
||||
task_setup_data.flags =
|
||||
RPC_TASK_SOFT|RPC_TASK_SOFTCONN|RPC_TASK_ASYNC;
|
||||
calldata->args.verifier = &clp->cl_confirm;
|
||||
} else {
|
||||
calldata->args.verifier = &verifier;
|
||||
memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
|
||||
sizeof(calldata->args.verifier.data));
|
||||
}
|
||||
calldata->args.client = clp;
|
||||
#ifdef CONFIG_NFS_V4_1_MIGRATION
|
||||
|
|
|
@ -1761,7 +1761,7 @@ static void encode_exchange_id(struct xdr_stream *xdr,
|
|||
int len = 0;
|
||||
|
||||
encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr);
|
||||
encode_nfs4_verifier(xdr, args->verifier);
|
||||
encode_nfs4_verifier(xdr, &args->verifier);
|
||||
|
||||
encode_string(xdr, strlen(args->client->cl_owner_id),
|
||||
args->client->cl_owner_id);
|
||||
|
|
|
@ -16,6 +16,19 @@
|
|||
|
||||
#ifdef CONFIG_CPUSETS
|
||||
|
||||
/*
|
||||
* Static branch rewrites can happen in an arbitrary order for a given
|
||||
* key. In code paths where we need to loop with read_mems_allowed_begin() and
|
||||
* read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
|
||||
* to ensure that begin() always gets rewritten before retry() in the
|
||||
* disabled -> enabled transition. If not, then if local irqs are disabled
|
||||
* around the loop, we can deadlock since retry() would always be
|
||||
* comparing the latest value of the mems_allowed seqcount against 0 as
|
||||
* begin() still would see cpusets_enabled() as false. The enabled -> disabled
|
||||
* transition should happen in reverse order for the same reasons (want to stop
|
||||
* looking at real value of mems_allowed.sequence in retry() first).
|
||||
*/
|
||||
extern struct static_key_false cpusets_pre_enable_key;
|
||||
extern struct static_key_false cpusets_enabled_key;
|
||||
static inline bool cpusets_enabled(void)
|
||||
{
|
||||
|
@ -30,12 +43,14 @@ static inline int nr_cpusets(void)
|
|||
|
||||
static inline void cpuset_inc(void)
|
||||
{
|
||||
static_branch_inc(&cpusets_pre_enable_key);
|
||||
static_branch_inc(&cpusets_enabled_key);
|
||||
}
|
||||
|
||||
static inline void cpuset_dec(void)
|
||||
{
|
||||
static_branch_dec(&cpusets_enabled_key);
|
||||
static_branch_dec(&cpusets_pre_enable_key);
|
||||
}
|
||||
|
||||
extern int cpuset_init(void);
|
||||
|
@ -113,7 +128,7 @@ extern void cpuset_print_current_mems_allowed(void);
|
|||
*/
|
||||
static inline unsigned int read_mems_allowed_begin(void)
|
||||
{
|
||||
if (!cpusets_enabled())
|
||||
if (!static_branch_unlikely(&cpusets_pre_enable_key))
|
||||
return 0;
|
||||
|
||||
return read_seqcount_begin(¤t->mems_allowed_seq);
|
||||
|
@ -127,7 +142,7 @@ static inline unsigned int read_mems_allowed_begin(void)
|
|||
*/
|
||||
static inline bool read_mems_allowed_retry(unsigned int seq)
|
||||
{
|
||||
if (!cpusets_enabled())
|
||||
if (!static_branch_unlikely(&cpusets_enabled_key))
|
||||
return false;
|
||||
|
||||
return read_seqcount_retry(¤t->mems_allowed_seq, seq);
|
||||
|
|
|
@ -514,6 +514,10 @@ struct mm_struct {
|
|||
* PROT_NONE or PROT_NUMA mapped page.
|
||||
*/
|
||||
bool tlb_flush_pending;
|
||||
#endif
|
||||
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
|
||||
/* See flush_tlb_batched_pending() */
|
||||
bool tlb_flush_batched;
|
||||
#endif
|
||||
struct uprobes_state uprobes_state;
|
||||
#ifdef CONFIG_X86_INTEL_MPX
|
||||
|
|
|
@ -1199,7 +1199,7 @@ struct nfs41_state_protection {
|
|||
|
||||
struct nfs41_exchange_id_args {
|
||||
struct nfs_client *client;
|
||||
nfs4_verifier *verifier;
|
||||
nfs4_verifier verifier;
|
||||
u32 flags;
|
||||
struct nfs41_state_protection state_protect;
|
||||
};
|
||||
|
|
|
@ -33,6 +33,8 @@ enum dev_dma_attr {
|
|||
DEV_DMA_COHERENT,
|
||||
};
|
||||
|
||||
struct fwnode_handle *dev_fwnode(struct device *dev);
|
||||
|
||||
bool device_property_present(struct device *dev, const char *propname);
|
||||
int device_property_read_u8_array(struct device *dev, const char *propname,
|
||||
u8 *val, size_t nval);
|
||||
|
|
|
@ -842,6 +842,16 @@ struct signal_struct {
|
|||
|
||||
#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
|
||||
|
||||
#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
|
||||
SIGNAL_STOP_CONTINUED)
|
||||
|
||||
static inline void signal_set_stop_flags(struct signal_struct *sig,
|
||||
unsigned int flags)
|
||||
{
|
||||
WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
|
||||
sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
|
||||
}
|
||||
|
||||
/* If true, all threads except ->group_exit_task have pending SIGKILL */
|
||||
static inline int signal_group_exit(const struct signal_struct *sig)
|
||||
{
|
||||
|
|
|
@ -226,7 +226,7 @@ static inline const char *__check_heap_object(const void *ptr,
|
|||
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
|
||||
*/
|
||||
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
|
||||
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
|
||||
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
|
||||
#ifndef KMALLOC_SHIFT_LOW
|
||||
#define KMALLOC_SHIFT_LOW 3
|
||||
#endif
|
||||
|
@ -239,7 +239,7 @@ static inline const char *__check_heap_object(const void *ptr,
|
|||
* be allocated from the same page.
|
||||
*/
|
||||
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
|
||||
#define KMALLOC_SHIFT_MAX 30
|
||||
#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
|
||||
#ifndef KMALLOC_SHIFT_LOW
|
||||
#define KMALLOC_SHIFT_LOW 3
|
||||
#endif
|
||||
|
|
|
@ -311,6 +311,7 @@ enum {
|
|||
|
||||
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
|
||||
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
|
||||
__WQ_ORDERED_EXPLICIT = 1 << 18, /* internal: alloc_ordered_workqueue() */
|
||||
__WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
|
||||
|
||||
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
||||
|
@ -409,7 +410,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
|
|||
* Pointer to the allocated workqueue on success, %NULL on failure.
|
||||
*/
|
||||
#define alloc_ordered_workqueue(fmt, flags, args...) \
|
||||
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
|
||||
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
|
||||
__WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
|
||||
|
||||
#define create_workqueue(name) \
|
||||
alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
|
||||
|
|
|
@ -556,6 +556,7 @@ iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends,
|
|||
memcpy(stream + lcp_len,
|
||||
((char *) &iwe->u) + IW_EV_POINT_OFF,
|
||||
IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
|
||||
if (iwe->u.data.length && extra)
|
||||
memcpy(stream + point_len, extra, iwe->u.data.length);
|
||||
stream += event_len;
|
||||
}
|
||||
|
|
|
@ -460,6 +460,8 @@ _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member)
|
|||
|
||||
#define _sctp_walk_params(pos, chunk, end, member)\
|
||||
for (pos.v = chunk->member;\
|
||||
(pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
|
||||
(void *)chunk + end) &&\
|
||||
pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
|
||||
ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
|
||||
pos.v += SCTP_PAD4(ntohs(pos.p->length)))
|
||||
|
@ -470,6 +472,8 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length))
|
|||
#define _sctp_walk_errors(err, chunk_hdr, end)\
|
||||
for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
|
||||
sizeof(sctp_chunkhdr_t));\
|
||||
((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\
|
||||
(void *)chunk_hdr + end) &&\
|
||||
(void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
|
||||
ntohs(err->length) >= sizeof(sctp_errhdr_t); \
|
||||
err = (sctp_errhdr_t *)((void *)err + SCTP_PAD4(ntohs(err->length))))
|
||||
|
|
|
@ -563,6 +563,7 @@ struct iscsi_conn {
|
|||
#define LOGIN_FLAGS_READ_ACTIVE 1
|
||||
#define LOGIN_FLAGS_CLOSED 2
|
||||
#define LOGIN_FLAGS_READY 4
|
||||
#define LOGIN_FLAGS_INITIAL_PDU 8
|
||||
unsigned long login_flags;
|
||||
struct delayed_work login_work;
|
||||
struct delayed_work login_cleanup_work;
|
||||
|
|
|
@ -3488,11 +3488,11 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
|
|||
cgrp->subtree_control &= ~disable;
|
||||
|
||||
ret = cgroup_apply_control(cgrp);
|
||||
|
||||
cgroup_finalize_control(cgrp, ret);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
kernfs_activate(cgrp->kn);
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
cgroup_kn_unlock(of->kn);
|
||||
return ret ?: nbytes;
|
||||
|
@ -5724,6 +5724,10 @@ int __init cgroup_init(void)
|
|||
|
||||
if (ss->bind)
|
||||
ss->bind(init_css_set.subsys[ssid]);
|
||||
|
||||
mutex_lock(&cgroup_mutex);
|
||||
css_populate_dir(init_css_set.subsys[ssid]);
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
}
|
||||
|
||||
/* init_css_set.subsys[] has been updated, re-hash */
|
||||
|
|
|
@ -61,6 +61,7 @@
|
|||
#include <linux/cgroup.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
|
||||
DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
|
||||
|
||||
/* See "Frequency meter" comments, below. */
|
||||
|
|
|
@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct *task)
|
|||
* fresh group stop. Read comment in do_signal_stop() for details.
|
||||
*/
|
||||
if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
|
||||
sig->flags = SIGNAL_STOP_STOPPED;
|
||||
signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
|
@ -845,7 +845,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
|
|||
* will take ->siglock, notice SIGNAL_CLD_MASK, and
|
||||
* notify its parent. See get_signal_to_deliver().
|
||||
*/
|
||||
signal->flags = why | SIGNAL_STOP_CONTINUED;
|
||||
signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
|
||||
signal->group_stop_count = 0;
|
||||
signal->group_exit_code = 0;
|
||||
}
|
||||
|
|
|
@ -1536,7 +1536,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
|
|||
base->is_idle = false;
|
||||
} else {
|
||||
if (!is_max_delta)
|
||||
expires = basem + (nextevt - basej) * TICK_NSEC;
|
||||
expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
|
||||
/*
|
||||
* If we expect to sleep more than a tick, mark the base idle:
|
||||
*/
|
||||
|
|
|
@ -3730,9 +3730,13 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
|
|||
return -EINVAL;
|
||||
|
||||
/* creating multiple pwqs breaks ordering guarantee */
|
||||
if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
|
||||
if (!list_empty(&wq->pwqs)) {
|
||||
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
|
||||
return -EINVAL;
|
||||
|
||||
wq->flags &= ~__WQ_ORDERED;
|
||||
}
|
||||
|
||||
ctx = apply_wqattrs_prepare(wq, attrs);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
@ -3915,6 +3919,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
|
|||
struct workqueue_struct *wq;
|
||||
struct pool_workqueue *pwq;
|
||||
|
||||
/*
|
||||
* Unbound && max_active == 1 used to imply ordered, which is no
|
||||
* longer the case on NUMA machines due to per-node pools. While
|
||||
* alloc_ordered_workqueue() is the right way to create an ordered
|
||||
* workqueue, keep the previous behavior to avoid subtle breakages
|
||||
* on NUMA.
|
||||
*/
|
||||
if ((flags & WQ_UNBOUND) && max_active == 1)
|
||||
flags |= __WQ_ORDERED;
|
||||
|
||||
/* see the comment above the definition of WQ_POWER_EFFICIENT */
|
||||
if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
|
||||
flags |= WQ_UNBOUND;
|
||||
|
@ -4103,13 +4117,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
|
|||
struct pool_workqueue *pwq;
|
||||
|
||||
/* disallow meddling with max_active for ordered workqueues */
|
||||
if (WARN_ON(wq->flags & __WQ_ORDERED))
|
||||
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
|
||||
return;
|
||||
|
||||
max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
|
||||
|
||||
mutex_lock(&wq->mutex);
|
||||
|
||||
wq->flags &= ~__WQ_ORDERED;
|
||||
wq->saved_max_active = max_active;
|
||||
|
||||
for_each_pwq(pwq, wq)
|
||||
|
@ -5214,7 +5229,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
|
|||
* attributes breaks ordering guarantee. Disallow exposing ordered
|
||||
* workqueues.
|
||||
*/
|
||||
if (WARN_ON(wq->flags & __WQ_ORDERED))
|
||||
if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
|
||||
return -EINVAL;
|
||||
|
||||
wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
|
||||
|
|
|
@ -145,7 +145,7 @@ config DEBUG_INFO_REDUCED
|
|||
|
||||
config DEBUG_INFO_SPLIT
|
||||
bool "Produce split debuginfo in .dwo files"
|
||||
depends on DEBUG_INFO
|
||||
depends on DEBUG_INFO && !FRV
|
||||
help
|
||||
Generate debug info into separate .dwo files. This significantly
|
||||
reduces the build directory size for builds with DEBUG_INFO,
|
||||
|
|
|
@ -472,6 +472,7 @@ struct tlbflush_unmap_batch;
|
|||
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
|
||||
void try_to_unmap_flush(void);
|
||||
void try_to_unmap_flush_dirty(void);
|
||||
void flush_tlb_batched_pending(struct mm_struct *mm);
|
||||
#else
|
||||
static inline void try_to_unmap_flush(void)
|
||||
{
|
||||
|
@ -479,7 +480,9 @@ static inline void try_to_unmap_flush(void)
|
|||
static inline void try_to_unmap_flush_dirty(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void flush_tlb_batched_pending(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
|
||||
|
||||
extern const struct trace_print_flags pageflag_names[];
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/swap.h>
|
||||
#include <linux/swapops.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include "internal.h"
|
||||
|
||||
#include <asm/tlb.h>
|
||||
|
||||
|
@ -282,6 +283,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
|
|||
return 0;
|
||||
|
||||
orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
||||
flush_tlb_batched_pending(mm);
|
||||
arch_enter_lazy_mmu_mode();
|
||||
for (; addr != end; pte++, addr += PAGE_SIZE) {
|
||||
ptent = *pte;
|
||||
|
|
|
@ -1124,6 +1124,7 @@ again:
|
|||
init_rss_vec(rss);
|
||||
start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
||||
pte = start_pte;
|
||||
flush_tlb_batched_pending(mm);
|
||||
arch_enter_lazy_mmu_mode();
|
||||
do {
|
||||
pte_t ptent = *pte;
|
||||
|
|
|
@ -74,6 +74,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
if (!pte)
|
||||
return 0;
|
||||
|
||||
flush_tlb_batched_pending(vma->vm_mm);
|
||||
arch_enter_lazy_mmu_mode();
|
||||
do {
|
||||
oldpte = *pte;
|
||||
|
|
|
@ -142,6 +142,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
|
|||
new_ptl = pte_lockptr(mm, new_pmd);
|
||||
if (new_ptl != old_ptl)
|
||||
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
|
||||
flush_tlb_batched_pending(vma->vm_mm);
|
||||
arch_enter_lazy_mmu_mode();
|
||||
|
||||
for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
|
||||
|
|
|
@ -1875,14 +1875,14 @@ int move_freepages(struct zone *zone,
|
|||
#endif
|
||||
|
||||
for (page = start_page; page <= end_page;) {
|
||||
/* Make sure we are not inadvertently changing nodes */
|
||||
VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
|
||||
|
||||
if (!pfn_valid_within(page_to_pfn(page))) {
|
||||
page++;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Make sure we are not inadvertently changing nodes */
|
||||
VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
|
||||
|
||||
if (!PageBuddy(page)) {
|
||||
page++;
|
||||
continue;
|
||||
|
@ -6445,8 +6445,8 @@ unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
|
|||
}
|
||||
|
||||
if (pages && s)
|
||||
pr_info("Freeing %s memory: %ldK (%p - %p)\n",
|
||||
s, pages << (PAGE_SHIFT - 10), start, end);
|
||||
pr_info("Freeing %s memory: %ldK\n",
|
||||
s, pages << (PAGE_SHIFT - 10));
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
|
36
mm/rmap.c
36
mm/rmap.c
|
@ -616,6 +616,13 @@ static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
|
|||
cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
|
||||
tlb_ubc->flush_required = true;
|
||||
|
||||
/*
|
||||
* Ensure compiler does not re-order the setting of tlb_flush_batched
|
||||
* before the PTE is cleared.
|
||||
*/
|
||||
barrier();
|
||||
mm->tlb_flush_batched = true;
|
||||
|
||||
/*
|
||||
* If the PTE was dirty then it's best to assume it's writable. The
|
||||
* caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
|
||||
|
@ -643,6 +650,35 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
|
|||
|
||||
return should_defer;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reclaim unmaps pages under the PTL but do not flush the TLB prior to
|
||||
* releasing the PTL if TLB flushes are batched. It's possible for a parallel
|
||||
* operation such as mprotect or munmap to race between reclaim unmapping
|
||||
* the page and flushing the page. If this race occurs, it potentially allows
|
||||
* access to data via a stale TLB entry. Tracking all mm's that have TLB
|
||||
* batching in flight would be expensive during reclaim so instead track
|
||||
* whether TLB batching occurred in the past and if so then do a flush here
|
||||
* if required. This will cost one additional flush per reclaim cycle paid
|
||||
* by the first operation at risk such as mprotect and mumap.
|
||||
*
|
||||
* This must be called under the PTL so that an access to tlb_flush_batched
|
||||
* that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
|
||||
* via the PTL.
|
||||
*/
|
||||
void flush_tlb_batched_pending(struct mm_struct *mm)
|
||||
{
|
||||
if (mm->tlb_flush_batched) {
|
||||
flush_tlb_mm(mm);
|
||||
|
||||
/*
|
||||
* Do not allow the compiler to re-order the clearing of
|
||||
* tlb_flush_batched before the tlb is flushed.
|
||||
*/
|
||||
barrier();
|
||||
mm->tlb_flush_batched = false;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
|
||||
struct page *page, bool writable)
|
||||
|
|
|
@ -28,6 +28,7 @@ static int dev_ifname(struct net *net, struct ifreq __user *arg)
|
|||
|
||||
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
|
||||
return -EFAULT;
|
||||
ifr.ifr_name[IFNAMSIZ-1] = 0;
|
||||
|
||||
error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
|
||||
if (error)
|
||||
|
|
|
@ -1965,7 +1965,8 @@ static int do_setlink(const struct sk_buff *skb,
|
|||
struct sockaddr *sa;
|
||||
int len;
|
||||
|
||||
len = sizeof(sa_family_t) + dev->addr_len;
|
||||
len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
|
||||
sizeof(*sa));
|
||||
sa = kmalloc(len, GFP_KERNEL);
|
||||
if (!sa) {
|
||||
err = -ENOMEM;
|
||||
|
|
|
@ -1471,9 +1471,12 @@ int dccp_feat_init(struct sock *sk)
|
|||
* singleton values (which always leads to failure).
|
||||
* These settings can still (later) be overridden via sockopts.
|
||||
*/
|
||||
if (ccid_get_builtin_ccids(&tx.val, &tx.len) ||
|
||||
ccid_get_builtin_ccids(&rx.val, &rx.len))
|
||||
if (ccid_get_builtin_ccids(&tx.val, &tx.len))
|
||||
return -ENOBUFS;
|
||||
if (ccid_get_builtin_ccids(&rx.val, &rx.len)) {
|
||||
kfree(tx.val);
|
||||
return -ENOBUFS;
|
||||
}
|
||||
|
||||
if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) ||
|
||||
!dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len))
|
||||
|
|
|
@ -637,6 +637,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
|||
goto drop_and_free;
|
||||
|
||||
inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
|
||||
reqsk_put(req);
|
||||
return 0;
|
||||
|
||||
drop_and_free:
|
||||
|
|
|
@ -380,6 +380,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
|||
goto drop_and_free;
|
||||
|
||||
inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
|
||||
reqsk_put(req);
|
||||
return 0;
|
||||
|
||||
drop_and_free:
|
||||
|
|
|
@ -1320,13 +1320,14 @@ static struct pernet_operations fib_net_ops = {
|
|||
|
||||
void __init ip_fib_init(void)
|
||||
{
|
||||
rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
|
||||
rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
|
||||
rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
|
||||
fib_trie_init();
|
||||
|
||||
register_pernet_subsys(&fib_net_ops);
|
||||
|
||||
register_netdevice_notifier(&fib_netdev_notifier);
|
||||
register_inetaddr_notifier(&fib_inetaddr_notifier);
|
||||
|
||||
fib_trie_init();
|
||||
rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
|
||||
rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
|
||||
rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
|
||||
}
|
||||
|
|
|
@ -958,7 +958,8 @@ static int __ip_append_data(struct sock *sk,
|
|||
csummode = CHECKSUM_PARTIAL;
|
||||
|
||||
cork->length += length;
|
||||
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
|
||||
if ((((length + (skb ? skb->len : fragheaderlen)) > mtu) ||
|
||||
(skb && skb_is_gso(skb))) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
|
||||
(sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
|
||||
|
|
|
@ -126,6 +126,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
|
|||
/* ip_route_me_harder expects skb->dst to be set */
|
||||
skb_dst_set_noref(nskb, skb_dst(oldskb));
|
||||
|
||||
nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
|
||||
|
||||
skb_reserve(nskb, LL_MAX_HEADER);
|
||||
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
|
||||
ip4_dst_hoplimit(skb_dst(nskb)));
|
||||
|
|
|
@ -334,6 +334,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
|
|||
treq = tcp_rsk(req);
|
||||
treq->rcv_isn = ntohl(th->seq) - 1;
|
||||
treq->snt_isn = cookie;
|
||||
treq->txhash = net_tx_rndhash();
|
||||
req->mss = mss;
|
||||
ireq->ir_num = ntohs(th->dest);
|
||||
ireq->ir_rmt_port = th->source;
|
||||
|
|
|
@ -980,7 +980,7 @@ static struct ctl_table ipv4_net_table[] = {
|
|||
.data = &init_net.ipv4.sysctl_tcp_notsent_lowat,
|
||||
.maxlen = sizeof(unsigned int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
.proc_handler = proc_douintvec,
|
||||
},
|
||||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
||||
{
|
||||
|
|
|
@ -83,7 +83,8 @@ struct bbr {
|
|||
cwnd_gain:10, /* current gain for setting cwnd */
|
||||
full_bw_cnt:3, /* number of rounds without large bw gains */
|
||||
cycle_idx:3, /* current index in pacing_gain cycle array */
|
||||
unused_b:6;
|
||||
has_seen_rtt:1, /* have we seen an RTT sample yet? */
|
||||
unused_b:5;
|
||||
u32 prior_cwnd; /* prior cwnd upon entering loss recovery */
|
||||
u32 full_bw; /* recent bw, to estimate if pipe is full */
|
||||
};
|
||||
|
@ -182,6 +183,35 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
|
|||
return rate >> BW_SCALE;
|
||||
}
|
||||
|
||||
/* Convert a BBR bw and gain factor to a pacing rate in bytes per second. */
|
||||
static u32 bbr_bw_to_pacing_rate(struct sock *sk, u32 bw, int gain)
|
||||
{
|
||||
u64 rate = bw;
|
||||
|
||||
rate = bbr_rate_bytes_per_sec(sk, rate, gain);
|
||||
rate = min_t(u64, rate, sk->sk_max_pacing_rate);
|
||||
return rate;
|
||||
}
|
||||
|
||||
/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
|
||||
static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct bbr *bbr = inet_csk_ca(sk);
|
||||
u64 bw;
|
||||
u32 rtt_us;
|
||||
|
||||
if (tp->srtt_us) { /* any RTT sample yet? */
|
||||
rtt_us = max(tp->srtt_us >> 3, 1U);
|
||||
bbr->has_seen_rtt = 1;
|
||||
} else { /* no RTT sample yet */
|
||||
rtt_us = USEC_PER_MSEC; /* use nominal default RTT */
|
||||
}
|
||||
bw = (u64)tp->snd_cwnd * BW_UNIT;
|
||||
do_div(bw, rtt_us);
|
||||
sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
|
||||
}
|
||||
|
||||
/* Pace using current bw estimate and a gain factor. In order to help drive the
|
||||
* network toward lower queues while maintaining high utilization and low
|
||||
* latency, the average pacing rate aims to be slightly (~1%) lower than the
|
||||
|
@ -191,12 +221,13 @@ static u64 bbr_rate_bytes_per_sec(struct sock *sk, u64 rate, int gain)
|
|||
*/
|
||||
static void bbr_set_pacing_rate(struct sock *sk, u32 bw, int gain)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct bbr *bbr = inet_csk_ca(sk);
|
||||
u64 rate = bw;
|
||||
u32 rate = bbr_bw_to_pacing_rate(sk, bw, gain);
|
||||
|
||||
rate = bbr_rate_bytes_per_sec(sk, rate, gain);
|
||||
rate = min_t(u64, rate, sk->sk_max_pacing_rate);
|
||||
if (bbr->mode != BBR_STARTUP || rate > sk->sk_pacing_rate)
|
||||
if (unlikely(!bbr->has_seen_rtt && tp->srtt_us))
|
||||
bbr_init_pacing_rate_from_rtt(sk);
|
||||
if (bbr_full_bw_reached(sk) || rate > sk->sk_pacing_rate)
|
||||
sk->sk_pacing_rate = rate;
|
||||
}
|
||||
|
||||
|
@ -769,7 +800,6 @@ static void bbr_init(struct sock *sk)
|
|||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct bbr *bbr = inet_csk_ca(sk);
|
||||
u64 bw;
|
||||
|
||||
bbr->prior_cwnd = 0;
|
||||
bbr->tso_segs_goal = 0; /* default segs per skb until first ACK */
|
||||
|
@ -785,11 +815,8 @@ static void bbr_init(struct sock *sk)
|
|||
|
||||
minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
|
||||
|
||||
/* Initialize pacing rate to: high_gain * init_cwnd / RTT. */
|
||||
bw = (u64)tp->snd_cwnd * BW_UNIT;
|
||||
do_div(bw, (tp->srtt_us >> 3) ? : USEC_PER_MSEC);
|
||||
sk->sk_pacing_rate = 0; /* force an update of sk_pacing_rate */
|
||||
bbr_set_pacing_rate(sk, bw, bbr_high_gain);
|
||||
bbr->has_seen_rtt = 0;
|
||||
bbr_init_pacing_rate_from_rtt(sk);
|
||||
|
||||
bbr->restore_cwnd = 0;
|
||||
bbr->round_start = 0;
|
||||
|
|
|
@ -671,8 +671,6 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
|
|||
*prevhdr = NEXTHDR_FRAGMENT;
|
||||
tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
|
||||
if (!tmp_hdr) {
|
||||
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_FRAGFAILS);
|
||||
err = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
@ -791,8 +789,6 @@ slow_path:
|
|||
frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
|
||||
hroom + troom, GFP_ATOMIC);
|
||||
if (!frag) {
|
||||
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IPSTATS_MIB_FRAGFAILS);
|
||||
err = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
@ -1384,7 +1380,7 @@ emsgsize:
|
|||
*/
|
||||
|
||||
cork->length += length;
|
||||
if ((((length + fragheaderlen) > mtu) ||
|
||||
if ((((length + (skb ? skb->len : headersize)) > mtu) ||
|
||||
(skb && skb_is_gso(skb))) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
|
||||
|
|
|
@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
|
|||
fl6.fl6_sport = otcph->dest;
|
||||
fl6.fl6_dport = otcph->source;
|
||||
fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
|
||||
fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
|
||||
security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
|
||||
dst = ip6_route_output(net, NULL, &fl6);
|
||||
if (dst->error) {
|
||||
|
@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
|
|||
|
||||
skb_dst_set(nskb, dst);
|
||||
|
||||
nskb->mark = fl6.flowi6_mark;
|
||||
|
||||
skb_reserve(nskb, hh_len + dst->header_len);
|
||||
ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
|
||||
ip6_dst_hoplimit(dst));
|
||||
|
|
|
@ -78,7 +78,7 @@ EXPORT_SYMBOL(ipv6_select_ident);
|
|||
|
||||
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
|
||||
{
|
||||
u16 offset = sizeof(struct ipv6hdr);
|
||||
unsigned int offset = sizeof(struct ipv6hdr);
|
||||
unsigned int packet_len = skb_tail_pointer(skb) -
|
||||
skb_network_header(skb);
|
||||
int found_rhdr = 0;
|
||||
|
@ -86,6 +86,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
|
|||
|
||||
while (offset <= packet_len) {
|
||||
struct ipv6_opt_hdr *exthdr;
|
||||
unsigned int len;
|
||||
|
||||
switch (**nexthdr) {
|
||||
|
||||
|
@ -111,7 +112,10 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
|
|||
|
||||
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
|
||||
offset);
|
||||
offset += ipv6_optlen(exthdr);
|
||||
len = ipv6_optlen(exthdr);
|
||||
if (len + offset >= IPV6_MAXPLEN)
|
||||
return -EINVAL;
|
||||
offset += len;
|
||||
*nexthdr = &exthdr->nexthdr;
|
||||
}
|
||||
|
||||
|
|
|
@ -209,6 +209,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
|
|||
treq->snt_synack.v64 = 0;
|
||||
treq->rcv_isn = ntohl(th->seq) - 1;
|
||||
treq->snt_isn = cookie;
|
||||
treq->txhash = net_tx_rndhash();
|
||||
|
||||
/*
|
||||
* We need to lookup the dst_entry to get the correct window size.
|
||||
|
|
|
@ -1088,8 +1088,8 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
|
|||
|
||||
nla_for_each_nested(a, attr, rem) {
|
||||
int type = nla_type(a);
|
||||
int maxlen = ovs_ct_attr_lens[type].maxlen;
|
||||
int minlen = ovs_ct_attr_lens[type].minlen;
|
||||
int maxlen;
|
||||
int minlen;
|
||||
|
||||
if (type > OVS_CT_ATTR_MAX) {
|
||||
OVS_NLERR(log,
|
||||
|
@ -1097,6 +1097,9 @@ static int parse_ct(const struct nlattr *attr, struct ovs_conntrack_info *info,
|
|||
type, OVS_CT_ATTR_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
maxlen = ovs_ct_attr_lens[type].maxlen;
|
||||
minlen = ovs_ct_attr_lens[type].minlen;
|
||||
if (nla_len(a) < minlen || nla_len(a) > maxlen) {
|
||||
OVS_NLERR(log,
|
||||
"Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
|
||||
|
|
|
@ -4322,7 +4322,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
|
|||
register_prot_hook(sk);
|
||||
}
|
||||
spin_unlock(&po->bind_lock);
|
||||
if (closing && (po->tp_version > TPACKET_V2)) {
|
||||
if (pg_vec && (po->tp_version > TPACKET_V2)) {
|
||||
/* Because we don't support block-based V3 on tx-ring */
|
||||
if (!tx_ring)
|
||||
prb_shutdown_retire_blk_timer(po, rb_queue);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue