From 2eb742bfa50d3ec740225b6552f2c2cab193f75b Mon Sep 17 00:00:00 2001 From: Julian Sikorski Date: Sun, 21 Jul 2024 06:04:42 +0000 Subject: [PATCH] Update odroidxu4-current to 6.6.41 --- .../odroidxu4-6.6/patch-6.6.40-41.patch | 3883 +++++++++++++++++ 1 file changed, 3883 insertions(+) create mode 100644 patch/kernel/archive/odroidxu4-6.6/patch-6.6.40-41.patch diff --git a/patch/kernel/archive/odroidxu4-6.6/patch-6.6.40-41.patch b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.40-41.patch new file mode 100644 index 000000000000..e5016015521c --- /dev/null +++ b/patch/kernel/archive/odroidxu4-6.6/patch-6.6.40-41.patch @@ -0,0 +1,3883 @@ +diff --git a/Documentation/admin-guide/cifs/usage.rst b/Documentation/admin-guide/cifs/usage.rst +index 5f936b4b60188..20aba92dfc5f5 100644 +--- a/Documentation/admin-guide/cifs/usage.rst ++++ b/Documentation/admin-guide/cifs/usage.rst +@@ -722,40 +722,26 @@ Configuration pseudo-files: + ======================= ======================================================= + SecurityFlags Flags which control security negotiation and + also packet signing. Authentication (may/must) +- flags (e.g. for NTLM and/or NTLMv2) may be combined with ++ flags (e.g. for NTLMv2) may be combined with + the signing flags. Specifying two different password + hashing mechanisms (as "must use") on the other hand + does not make much sense. Default flags are:: + +- 0x07007 +- +- (NTLM, NTLMv2 and packet signing allowed). The maximum +- allowable flags if you want to allow mounts to servers +- using weaker password hashes is 0x37037 (lanman, +- plaintext, ntlm, ntlmv2, signing allowed). Some +- SecurityFlags require the corresponding menuconfig +- options to be enabled. Enabling plaintext +- authentication currently requires also enabling +- lanman authentication in the security flags +- because the cifs module only supports sending +- laintext passwords using the older lanman dialect +- form of the session setup SMB. (e.g. for authentication +- using plain text passwords, set the SecurityFlags +- to 0x30030):: ++ 0x00C5 ++ ++ (NTLMv2 and packet signing allowed). Some SecurityFlags ++ may require enabling a corresponding menuconfig option. + + may use packet signing 0x00001 + must use packet signing 0x01001 +- may use NTLM (most common password hash) 0x00002 +- must use NTLM 0x02002 + may use NTLMv2 0x00004 + must use NTLMv2 0x04004 +- may use Kerberos security 0x00008 +- must use Kerberos 0x08008 +- may use lanman (weak) password hash 0x00010 +- must use lanman password hash 0x10010 +- may use plaintext passwords 0x00020 +- must use plaintext passwords 0x20020 +- (reserved for future packet encryption) 0x00040 ++ may use Kerberos security (krb5) 0x00008 ++ must use Kerberos 0x08008 ++ may use NTLMSSP 0x00080 ++ must use NTLMSSP 0x80080 ++ seal (packet encryption) 0x00040 ++ must seal (not implemented yet) 0x40040 + + cifsFYI If set to non-zero value, additional debug information + will be logged to the system error log. This field +diff --git a/Makefile b/Makefile +index c844130774569..7271f0106fd81 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 6 +-SUBLEVEL = 40 ++SUBLEVEL = 41 + EXTRAVERSION = + NAME = Hurr durr I'ma ninja sloth + +diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c +index 8aa39db095d76..2c5155bd376ba 100644 +--- a/arch/arm/mach-davinci/pm.c ++++ b/arch/arm/mach-davinci/pm.c +@@ -61,7 +61,7 @@ static void davinci_pm_suspend(void) + + /* Configure sleep count in deep sleep register */ + val = __raw_readl(pm_config.deepsleep_reg); +- val &= ~DEEPSLEEP_SLEEPCOUNT_MASK, ++ val &= ~DEEPSLEEP_SLEEPCOUNT_MASK; + val |= pm_config.sleepcount; + __raw_writel(val, pm_config.deepsleep_reg); + +diff --git a/arch/arm64/boot/dts/qcom/sa8775p.dtsi b/arch/arm64/boot/dts/qcom/sa8775p.dtsi +index d4ca92b98c7db..88ef3b5d374b3 100644 +--- a/arch/arm64/boot/dts/qcom/sa8775p.dtsi ++++ b/arch/arm64/boot/dts/qcom/sa8775p.dtsi +@@ -2398,7 +2398,7 @@ arch_timer: timer { + interrupts = , + , + , +- ; ++ ; + }; + + pcie0: pci@1c00000{ +diff --git a/arch/arm64/boot/dts/qcom/sc8180x.dtsi b/arch/arm64/boot/dts/qcom/sc8180x.dtsi +index fbb9bf09078a0..dd207eb81360a 100644 +--- a/arch/arm64/boot/dts/qcom/sc8180x.dtsi ++++ b/arch/arm64/boot/dts/qcom/sc8180x.dtsi +@@ -2551,11 +2551,14 @@ usb_sec_dpphy: dp-phy@88ef200 { + + system-cache-controller@9200000 { + compatible = "qcom,sc8180x-llcc"; +- reg = <0 0x09200000 0 0x50000>, <0 0x09280000 0 0x50000>, +- <0 0x09300000 0 0x50000>, <0 0x09380000 0 0x50000>, +- <0 0x09600000 0 0x50000>; ++ reg = <0 0x09200000 0 0x58000>, <0 0x09280000 0 0x58000>, ++ <0 0x09300000 0 0x58000>, <0 0x09380000 0 0x58000>, ++ <0 0x09400000 0 0x58000>, <0 0x09480000 0 0x58000>, ++ <0 0x09500000 0 0x58000>, <0 0x09580000 0 0x58000>, ++ <0 0x09600000 0 0x58000>; + reg-names = "llcc0_base", "llcc1_base", "llcc2_base", +- "llcc3_base", "llcc_broadcast_base"; ++ "llcc3_base", "llcc4_base", "llcc5_base", ++ "llcc6_base", "llcc7_base", "llcc_broadcast_base"; + interrupts = ; + }; + +diff --git a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts +index a993ad15ea9a6..5c2894fcfa4a0 100644 +--- a/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts ++++ b/arch/arm64/boot/dts/qcom/sc8280xp-lenovo-thinkpad-x13s.dts +@@ -619,15 +619,16 @@ &i2c4 { + + status = "okay"; + +- /* FIXME: verify */ + touchscreen@10 { +- compatible = "hid-over-i2c"; ++ compatible = "elan,ekth5015m", "elan,ekth6915"; + reg = <0x10>; + +- hid-descr-addr = <0x1>; + interrupts-extended = <&tlmm 175 IRQ_TYPE_LEVEL_LOW>; +- vdd-supply = <&vreg_misc_3p3>; +- vddl-supply = <&vreg_s10b>; ++ reset-gpios = <&tlmm 99 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>; ++ no-reset-on-power-off; ++ ++ vcc33-supply = <&vreg_misc_3p3>; ++ vccio-supply = <&vreg_misc_3p3>; + + pinctrl-names = "default"; + pinctrl-0 = <&ts0_default>; +@@ -1451,8 +1452,8 @@ int-n-pins { + reset-n-pins { + pins = "gpio99"; + function = "gpio"; +- output-high; +- drive-strength = <16>; ++ drive-strength = <2>; ++ bias-disable; + }; + }; + +diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h +index dc17896a001a9..e7338ed540d8f 100644 +--- a/arch/s390/include/asm/processor.h ++++ b/arch/s390/include/asm/processor.h +@@ -308,8 +308,8 @@ static inline void __load_psw(psw_t psw) + */ + static __always_inline void __load_psw_mask(unsigned long mask) + { ++ psw_t psw __uninitialized; + unsigned long addr; +- psw_t psw; + + psw.mask = mask; + +diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c +index 6396d6b06a3a2..9355fbe5f51e9 100644 +--- a/arch/s390/mm/pgalloc.c ++++ b/arch/s390/mm/pgalloc.c +@@ -53,6 +53,8 @@ unsigned long *crst_table_alloc(struct mm_struct *mm) + + void crst_table_free(struct mm_struct *mm, unsigned long *table) + { ++ if (!table) ++ return; + pagetable_free(virt_to_ptdesc(table)); + } + +@@ -500,6 +502,8 @@ static unsigned long *base_crst_alloc(unsigned long val) + + static void base_crst_free(unsigned long *table) + { ++ if (!table) ++ return; + pagetable_free(virt_to_ptdesc(table)); + } + +diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S +index 5d96561c0d6ad..2192b6c33ea00 100644 +--- a/arch/x86/entry/entry_64.S ++++ b/arch/x86/entry/entry_64.S +@@ -167,22 +167,9 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) + jne swapgs_restore_regs_and_return_to_usermode + + /* +- * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot +- * restore RF properly. If the slowpath sets it for whatever reason, we +- * need to restore it correctly. +- * +- * SYSRET can restore TF, but unlike IRET, restoring TF results in a +- * trap from userspace immediately after SYSRET. This would cause an +- * infinite loop whenever #DB happens with register state that satisfies +- * the opportunistic SYSRET conditions. For example, single-stepping +- * this user code: +- * +- * movq $stuck_here, %rcx +- * pushfq +- * popq %r11 +- * stuck_here: +- * +- * would never get past 'stuck_here'. ++ * SYSRET cannot restore RF. It can restore TF, but unlike IRET, ++ * restoring TF results in a trap from userspace immediately after ++ * SYSRET. + */ + testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 + jnz swapgs_restore_regs_and_return_to_usermode +@@ -1527,13 +1514,13 @@ SYM_CODE_END(asm_exc_nmi) + * This handles SYSCALL from 32-bit code. There is no way to program + * MSRs to fully disable 32-bit SYSCALL. + */ +-SYM_CODE_START(ignore_sysret) ++SYM_CODE_START(entry_SYSCALL32_ignore) + UNWIND_HINT_END_OF_STACK + ENDBR + mov $-ENOSYS, %eax + CLEAR_CPU_BUFFERS + sysretl +-SYM_CODE_END(ignore_sysret) ++SYM_CODE_END(entry_SYSCALL32_ignore) + #endif + + .pushsection .text, "ax" +diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S +index 4c1dfc51c56e4..ebfccadf918cb 100644 +--- a/arch/x86/entry/entry_64_compat.S ++++ b/arch/x86/entry/entry_64_compat.S +@@ -90,10 +90,6 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL) + + cld + +- IBRS_ENTER +- UNTRAIN_RET +- CLEAR_BRANCH_HISTORY +- + /* + * SYSENTER doesn't filter flags, so we need to clear NT and AC + * ourselves. To save a few cycles, we can check whether +@@ -117,6 +113,16 @@ SYM_INNER_LABEL(entry_SYSENTER_compat_after_hwframe, SYM_L_GLOBAL) + jnz .Lsysenter_fix_flags + .Lsysenter_flags_fixed: + ++ /* ++ * CPU bugs mitigations mechanisms can call other functions. They ++ * should be invoked after making sure TF is cleared because ++ * single-step is ignored only for instructions inside the ++ * entry_SYSENTER_compat function. ++ */ ++ IBRS_ENTER ++ UNTRAIN_RET ++ CLEAR_BRANCH_HISTORY ++ + movq %rsp, %rdi + call do_SYSENTER_32 + /* XEN PV guests always use IRET path */ +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h +index 6e19d0f226000..67ad64efa9263 100644 +--- a/arch/x86/include/asm/processor.h ++++ b/arch/x86/include/asm/processor.h +@@ -399,7 +399,7 @@ static inline unsigned long cpu_kernelmode_gs_base(int cpu) + return (unsigned long)per_cpu(fixed_percpu_data.gs_base, cpu); + } + +-extern asmlinkage void ignore_sysret(void); ++extern asmlinkage void entry_SYSCALL32_ignore(void); + + /* Save actual FS/GS selectors and bases to current->thread */ + void current_save_fsgs(void); +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 340dd6cc11af4..74d566263467e 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -2134,7 +2134,7 @@ void syscall_init(void) + (unsigned long)(cpu_entry_stack(smp_processor_id()) + 1)); + wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat); + #else +- wrmsrl_cstar((unsigned long)ignore_sysret); ++ wrmsrl_cstar((unsigned long)entry_SYSCALL32_ignore); + wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG); + wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL); + wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL); +diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c +index bd6a7857ce058..831fa4a121598 100644 +--- a/drivers/acpi/processor_idle.c ++++ b/drivers/acpi/processor_idle.c +@@ -16,7 +16,6 @@ + #include + #include + #include /* need_resched() */ +-#include + #include + #include + #include +@@ -386,25 +385,24 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, + acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1); + } + +-static int acpi_cst_latency_cmp(const void *a, const void *b) ++static void acpi_cst_latency_sort(struct acpi_processor_cx *states, size_t length) + { +- const struct acpi_processor_cx *x = a, *y = b; ++ int i, j, k; + +- if (!(x->valid && y->valid)) +- return 0; +- if (x->latency > y->latency) +- return 1; +- if (x->latency < y->latency) +- return -1; +- return 0; +-} +-static void acpi_cst_latency_swap(void *a, void *b, int n) +-{ +- struct acpi_processor_cx *x = a, *y = b; ++ for (i = 1; i < length; i++) { ++ if (!states[i].valid) ++ continue; + +- if (!(x->valid && y->valid)) +- return; +- swap(x->latency, y->latency); ++ for (j = i - 1, k = i; j >= 0; j--) { ++ if (!states[j].valid) ++ continue; ++ ++ if (states[j].latency > states[k].latency) ++ swap(states[j].latency, states[k].latency); ++ ++ k = j; ++ } ++ } + } + + static int acpi_processor_power_verify(struct acpi_processor *pr) +@@ -449,10 +447,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) + + if (buggy_latency) { + pr_notice("FW issue: working around C-state latencies out of order\n"); +- sort(&pr->power.states[1], max_cstate, +- sizeof(struct acpi_processor_cx), +- acpi_cst_latency_cmp, +- acpi_cst_latency_swap); ++ acpi_cst_latency_sort(&pr->power.states[1], max_cstate); + } + + lapic_timer_propagate_broadcast(pr); +diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c +index ee71376f174b7..3bc1d9243dbd0 100644 +--- a/drivers/char/hpet.c ++++ b/drivers/char/hpet.c +@@ -289,8 +289,13 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) + if (!devp->hd_ireqfreq) + return -EIO; + +- if (count < sizeof(unsigned long)) +- return -EINVAL; ++ if (in_compat_syscall()) { ++ if (count < sizeof(compat_ulong_t)) ++ return -EINVAL; ++ } else { ++ if (count < sizeof(unsigned long)) ++ return -EINVAL; ++ } + + add_wait_queue(&devp->hd_waitqueue, &wait); + +@@ -314,9 +319,16 @@ hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos) + schedule(); + } + +- retval = put_user(data, (unsigned long __user *)buf); +- if (!retval) +- retval = sizeof(unsigned long); ++ if (in_compat_syscall()) { ++ retval = put_user(data, (compat_ulong_t __user *)buf); ++ if (!retval) ++ retval = sizeof(compat_ulong_t); ++ } else { ++ retval = put_user(data, (unsigned long __user *)buf); ++ if (!retval) ++ retval = sizeof(unsigned long); ++ } ++ + out: + __set_current_state(TASK_RUNNING); + remove_wait_queue(&devp->hd_waitqueue, &wait); +@@ -671,12 +683,24 @@ struct compat_hpet_info { + unsigned short hi_timer; + }; + ++/* 32-bit types would lead to different command codes which should be ++ * translated into 64-bit ones before passed to hpet_ioctl_common ++ */ ++#define COMPAT_HPET_INFO _IOR('h', 0x03, struct compat_hpet_info) ++#define COMPAT_HPET_IRQFREQ _IOW('h', 0x6, compat_ulong_t) ++ + static long + hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + { + struct hpet_info info; + int err; + ++ if (cmd == COMPAT_HPET_INFO) ++ cmd = HPET_INFO; ++ ++ if (cmd == COMPAT_HPET_IRQFREQ) ++ cmd = HPET_IRQFREQ; ++ + mutex_lock(&hpet_mutex); + err = hpet_ioctl_common(file->private_data, cmd, arg, &info); + mutex_unlock(&hpet_mutex); +diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c +index 37f1cdf46d291..4ac3a35dcd983 100644 +--- a/drivers/cpufreq/acpi-cpufreq.c ++++ b/drivers/cpufreq/acpi-cpufreq.c +@@ -890,8 +890,10 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) + if (perf->states[0].core_frequency * 1000 != freq_table[0].frequency) + pr_warn(FW_WARN "P-state 0 is not max freq\n"); + +- if (acpi_cpufreq_driver.set_boost) ++ if (acpi_cpufreq_driver.set_boost) { + set_boost(policy, acpi_cpufreq_driver.boost_enabled); ++ policy->boost_enabled = acpi_cpufreq_driver.boost_enabled; ++ } + + return result; + +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index 06e0294a17a8d..df445b44e9ec0 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -1422,7 +1422,8 @@ static int cpufreq_online(unsigned int cpu) + } + + /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */ +- policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy); ++ if (cpufreq_boost_enabled() && policy_has_boost_freq(policy)) ++ policy->boost_enabled = true; + + /* + * The initialization has succeeded and the policy is online. +diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c +index 79d4254d1f9bc..bd1651e709365 100644 +--- a/drivers/firmware/cirrus/cs_dsp.c ++++ b/drivers/firmware/cirrus/cs_dsp.c +@@ -1053,9 +1053,16 @@ struct cs_dsp_coeff_parsed_coeff { + int len; + }; + +-static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str) ++static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, unsigned int avail, ++ const u8 **str) + { +- int length; ++ int length, total_field_len; ++ ++ /* String fields are at least one __le32 */ ++ if (sizeof(__le32) > avail) { ++ *pos = NULL; ++ return 0; ++ } + + switch (bytes) { + case 1: +@@ -1068,10 +1075,16 @@ static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str) + return 0; + } + ++ total_field_len = ((length + bytes) + 3) & ~0x03; ++ if ((unsigned int)total_field_len > avail) { ++ *pos = NULL; ++ return 0; ++ } ++ + if (str) + *str = *pos + bytes; + +- *pos += ((length + bytes) + 3) & ~0x03; ++ *pos += total_field_len; + + return length; + } +@@ -1096,71 +1109,134 @@ static int cs_dsp_coeff_parse_int(int bytes, const u8 **pos) + return val; + } + +-static inline void cs_dsp_coeff_parse_alg(struct cs_dsp *dsp, const u8 **data, +- struct cs_dsp_coeff_parsed_alg *blk) ++static int cs_dsp_coeff_parse_alg(struct cs_dsp *dsp, ++ const struct wmfw_region *region, ++ struct cs_dsp_coeff_parsed_alg *blk) + { + const struct wmfw_adsp_alg_data *raw; ++ unsigned int data_len = le32_to_cpu(region->len); ++ unsigned int pos; ++ const u8 *tmp; ++ ++ raw = (const struct wmfw_adsp_alg_data *)region->data; + + switch (dsp->fw_ver) { + case 0: + case 1: +- raw = (const struct wmfw_adsp_alg_data *)*data; +- *data = raw->data; ++ if (sizeof(*raw) > data_len) ++ return -EOVERFLOW; + + blk->id = le32_to_cpu(raw->id); + blk->name = raw->name; +- blk->name_len = strlen(raw->name); ++ blk->name_len = strnlen(raw->name, ARRAY_SIZE(raw->name)); + blk->ncoeff = le32_to_cpu(raw->ncoeff); ++ ++ pos = sizeof(*raw); + break; + default: +- blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), data); +- blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), data, ++ if (sizeof(raw->id) > data_len) ++ return -EOVERFLOW; ++ ++ tmp = region->data; ++ blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), &tmp); ++ pos = tmp - region->data; ++ ++ tmp = ®ion->data[pos]; ++ blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos, + &blk->name); +- cs_dsp_coeff_parse_string(sizeof(u16), data, NULL); +- blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), data); ++ if (!tmp) ++ return -EOVERFLOW; ++ ++ pos = tmp - region->data; ++ cs_dsp_coeff_parse_string(sizeof(u16), &tmp, data_len - pos, NULL); ++ if (!tmp) ++ return -EOVERFLOW; ++ ++ pos = tmp - region->data; ++ if (sizeof(raw->ncoeff) > (data_len - pos)) ++ return -EOVERFLOW; ++ ++ blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), &tmp); ++ pos += sizeof(raw->ncoeff); + break; + } + ++ if ((int)blk->ncoeff < 0) ++ return -EOVERFLOW; ++ + cs_dsp_dbg(dsp, "Algorithm ID: %#x\n", blk->id); + cs_dsp_dbg(dsp, "Algorithm name: %.*s\n", blk->name_len, blk->name); + cs_dsp_dbg(dsp, "# of coefficient descriptors: %#x\n", blk->ncoeff); ++ ++ return pos; + } + +-static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data, +- struct cs_dsp_coeff_parsed_coeff *blk) ++static int cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, ++ const struct wmfw_region *region, ++ unsigned int pos, ++ struct cs_dsp_coeff_parsed_coeff *blk) + { + const struct wmfw_adsp_coeff_data *raw; ++ unsigned int data_len = le32_to_cpu(region->len); ++ unsigned int blk_len, blk_end_pos; + const u8 *tmp; +- int length; ++ ++ raw = (const struct wmfw_adsp_coeff_data *)®ion->data[pos]; ++ if (sizeof(raw->hdr) > (data_len - pos)) ++ return -EOVERFLOW; ++ ++ blk_len = le32_to_cpu(raw->hdr.size); ++ if (blk_len > S32_MAX) ++ return -EOVERFLOW; ++ ++ if (blk_len > (data_len - pos - sizeof(raw->hdr))) ++ return -EOVERFLOW; ++ ++ blk_end_pos = pos + sizeof(raw->hdr) + blk_len; ++ ++ blk->offset = le16_to_cpu(raw->hdr.offset); ++ blk->mem_type = le16_to_cpu(raw->hdr.type); + + switch (dsp->fw_ver) { + case 0: + case 1: +- raw = (const struct wmfw_adsp_coeff_data *)*data; +- *data = *data + sizeof(raw->hdr) + le32_to_cpu(raw->hdr.size); ++ if (sizeof(*raw) > (data_len - pos)) ++ return -EOVERFLOW; + +- blk->offset = le16_to_cpu(raw->hdr.offset); +- blk->mem_type = le16_to_cpu(raw->hdr.type); + blk->name = raw->name; +- blk->name_len = strlen(raw->name); ++ blk->name_len = strnlen(raw->name, ARRAY_SIZE(raw->name)); + blk->ctl_type = le16_to_cpu(raw->ctl_type); + blk->flags = le16_to_cpu(raw->flags); + blk->len = le32_to_cpu(raw->len); + break; + default: +- tmp = *data; +- blk->offset = cs_dsp_coeff_parse_int(sizeof(raw->hdr.offset), &tmp); +- blk->mem_type = cs_dsp_coeff_parse_int(sizeof(raw->hdr.type), &tmp); +- length = cs_dsp_coeff_parse_int(sizeof(raw->hdr.size), &tmp); +- blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, ++ pos += sizeof(raw->hdr); ++ tmp = ®ion->data[pos]; ++ blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos, + &blk->name); +- cs_dsp_coeff_parse_string(sizeof(u8), &tmp, NULL); +- cs_dsp_coeff_parse_string(sizeof(u16), &tmp, NULL); ++ if (!tmp) ++ return -EOVERFLOW; ++ ++ pos = tmp - region->data; ++ cs_dsp_coeff_parse_string(sizeof(u8), &tmp, data_len - pos, NULL); ++ if (!tmp) ++ return -EOVERFLOW; ++ ++ pos = tmp - region->data; ++ cs_dsp_coeff_parse_string(sizeof(u16), &tmp, data_len - pos, NULL); ++ if (!tmp) ++ return -EOVERFLOW; ++ ++ pos = tmp - region->data; ++ if (sizeof(raw->ctl_type) + sizeof(raw->flags) + sizeof(raw->len) > ++ (data_len - pos)) ++ return -EOVERFLOW; ++ + blk->ctl_type = cs_dsp_coeff_parse_int(sizeof(raw->ctl_type), &tmp); ++ pos += sizeof(raw->ctl_type); + blk->flags = cs_dsp_coeff_parse_int(sizeof(raw->flags), &tmp); ++ pos += sizeof(raw->flags); + blk->len = cs_dsp_coeff_parse_int(sizeof(raw->len), &tmp); +- +- *data = *data + sizeof(raw->hdr) + length; + break; + } + +@@ -1170,6 +1246,8 @@ static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data, + cs_dsp_dbg(dsp, "\tCoefficient flags: %#x\n", blk->flags); + cs_dsp_dbg(dsp, "\tALSA control type: %#x\n", blk->ctl_type); + cs_dsp_dbg(dsp, "\tALSA control len: %#x\n", blk->len); ++ ++ return blk_end_pos; + } + + static int cs_dsp_check_coeff_flags(struct cs_dsp *dsp, +@@ -1193,12 +1271,16 @@ static int cs_dsp_parse_coeff(struct cs_dsp *dsp, + struct cs_dsp_alg_region alg_region = {}; + struct cs_dsp_coeff_parsed_alg alg_blk; + struct cs_dsp_coeff_parsed_coeff coeff_blk; +- const u8 *data = region->data; +- int i, ret; ++ int i, pos, ret; ++ ++ pos = cs_dsp_coeff_parse_alg(dsp, region, &alg_blk); ++ if (pos < 0) ++ return pos; + +- cs_dsp_coeff_parse_alg(dsp, &data, &alg_blk); + for (i = 0; i < alg_blk.ncoeff; i++) { +- cs_dsp_coeff_parse_coeff(dsp, &data, &coeff_blk); ++ pos = cs_dsp_coeff_parse_coeff(dsp, region, pos, &coeff_blk); ++ if (pos < 0) ++ return pos; + + switch (coeff_blk.ctl_type) { + case WMFW_CTL_TYPE_BYTES: +@@ -1267,6 +1349,10 @@ static unsigned int cs_dsp_adsp1_parse_sizes(struct cs_dsp *dsp, + const struct wmfw_adsp1_sizes *adsp1_sizes; + + adsp1_sizes = (void *)&firmware->data[pos]; ++ if (sizeof(*adsp1_sizes) > firmware->size - pos) { ++ cs_dsp_err(dsp, "%s: file truncated\n", file); ++ return 0; ++ } + + cs_dsp_dbg(dsp, "%s: %d DM, %d PM, %d ZM\n", file, + le32_to_cpu(adsp1_sizes->dm), le32_to_cpu(adsp1_sizes->pm), +@@ -1283,6 +1369,10 @@ static unsigned int cs_dsp_adsp2_parse_sizes(struct cs_dsp *dsp, + const struct wmfw_adsp2_sizes *adsp2_sizes; + + adsp2_sizes = (void *)&firmware->data[pos]; ++ if (sizeof(*adsp2_sizes) > firmware->size - pos) { ++ cs_dsp_err(dsp, "%s: file truncated\n", file); ++ return 0; ++ } + + cs_dsp_dbg(dsp, "%s: %d XM, %d YM %d PM, %d ZM\n", file, + le32_to_cpu(adsp2_sizes->xm), le32_to_cpu(adsp2_sizes->ym), +@@ -1322,7 +1412,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, + struct regmap *regmap = dsp->regmap; + unsigned int pos = 0; + const struct wmfw_header *header; +- const struct wmfw_adsp1_sizes *adsp1_sizes; + const struct wmfw_footer *footer; + const struct wmfw_region *region; + const struct cs_dsp_region *mem; +@@ -1338,10 +1427,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, + + ret = -EINVAL; + +- pos = sizeof(*header) + sizeof(*adsp1_sizes) + sizeof(*footer); +- if (pos >= firmware->size) { +- cs_dsp_err(dsp, "%s: file too short, %zu bytes\n", +- file, firmware->size); ++ if (sizeof(*header) >= firmware->size) { ++ ret = -EOVERFLOW; + goto out_fw; + } + +@@ -1369,22 +1456,36 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, + + pos = sizeof(*header); + pos = dsp->ops->parse_sizes(dsp, file, pos, firmware); ++ if ((pos == 0) || (sizeof(*footer) > firmware->size - pos)) { ++ ret = -EOVERFLOW; ++ goto out_fw; ++ } + + footer = (void *)&firmware->data[pos]; + pos += sizeof(*footer); + + if (le32_to_cpu(header->len) != pos) { +- cs_dsp_err(dsp, "%s: unexpected header length %d\n", +- file, le32_to_cpu(header->len)); ++ ret = -EOVERFLOW; + goto out_fw; + } + + cs_dsp_dbg(dsp, "%s: timestamp %llu\n", file, + le64_to_cpu(footer->timestamp)); + +- while (pos < firmware->size && +- sizeof(*region) < firmware->size - pos) { ++ while (pos < firmware->size) { ++ /* Is there enough data for a complete block header? */ ++ if (sizeof(*region) > firmware->size - pos) { ++ ret = -EOVERFLOW; ++ goto out_fw; ++ } ++ + region = (void *)&(firmware->data[pos]); ++ ++ if (le32_to_cpu(region->len) > firmware->size - pos - sizeof(*region)) { ++ ret = -EOVERFLOW; ++ goto out_fw; ++ } ++ + region_name = "Unknown"; + reg = 0; + text = NULL; +@@ -1441,16 +1542,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, + regions, le32_to_cpu(region->len), offset, + region_name); + +- if (le32_to_cpu(region->len) > +- firmware->size - pos - sizeof(*region)) { +- cs_dsp_err(dsp, +- "%s.%d: %s region len %d bytes exceeds file length %zu\n", +- file, regions, region_name, +- le32_to_cpu(region->len), firmware->size); +- ret = -EINVAL; +- goto out_fw; +- } +- + if (text) { + memcpy(text, region->data, le32_to_cpu(region->len)); + cs_dsp_info(dsp, "%s: %s\n", file, text); +@@ -1501,6 +1592,9 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware, + cs_dsp_buf_free(&buf_list); + kfree(text); + ++ if (ret == -EOVERFLOW) ++ cs_dsp_err(dsp, "%s: file content overflows file data\n", file); ++ + return ret; + } + +@@ -2068,10 +2162,20 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware + pos = le32_to_cpu(hdr->len); + + blocks = 0; +- while (pos < firmware->size && +- sizeof(*blk) < firmware->size - pos) { ++ while (pos < firmware->size) { ++ /* Is there enough data for a complete block header? */ ++ if (sizeof(*blk) > firmware->size - pos) { ++ ret = -EOVERFLOW; ++ goto out_fw; ++ } ++ + blk = (void *)(&firmware->data[pos]); + ++ if (le32_to_cpu(blk->len) > firmware->size - pos - sizeof(*blk)) { ++ ret = -EOVERFLOW; ++ goto out_fw; ++ } ++ + type = le16_to_cpu(blk->type); + offset = le16_to_cpu(blk->offset); + version = le32_to_cpu(blk->ver) >> 8; +@@ -2168,17 +2272,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware + } + + if (reg) { +- if (le32_to_cpu(blk->len) > +- firmware->size - pos - sizeof(*blk)) { +- cs_dsp_err(dsp, +- "%s.%d: %s region len %d bytes exceeds file length %zu\n", +- file, blocks, region_name, +- le32_to_cpu(blk->len), +- firmware->size); +- ret = -EINVAL; +- goto out_fw; +- } +- + buf = cs_dsp_buf_alloc(blk->data, + le32_to_cpu(blk->len), + &buf_list); +@@ -2218,6 +2311,10 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware + regmap_async_complete(regmap); + cs_dsp_buf_free(&buf_list); + kfree(text); ++ ++ if (ret == -EOVERFLOW) ++ cs_dsp_err(dsp, "%s: file content overflows file data\n", file); ++ + return ret; + } + +diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c +index a32a93f9a60d0..84fdd3f5cc844 100644 +--- a/drivers/i2c/busses/i2c-rcar.c ++++ b/drivers/i2c/busses/i2c-rcar.c +@@ -114,6 +114,7 @@ enum rcar_i2c_type { + I2C_RCAR_GEN1, + I2C_RCAR_GEN2, + I2C_RCAR_GEN3, ++ I2C_RCAR_GEN4, + }; + + struct rcar_i2c_priv { +@@ -223,6 +224,14 @@ static void rcar_i2c_init(struct rcar_i2c_priv *priv) + + } + ++static void rcar_i2c_reset_slave(struct rcar_i2c_priv *priv) ++{ ++ rcar_i2c_write(priv, ICSIER, 0); ++ rcar_i2c_write(priv, ICSSR, 0); ++ rcar_i2c_write(priv, ICSCR, SDBS); ++ rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ ++} ++ + static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv) + { + int ret; +@@ -386,8 +395,8 @@ static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv, bool terminate) + dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), + sg_dma_len(&priv->sg), priv->dma_direction); + +- /* Gen3 can only do one RXDMA per transfer and we just completed it */ +- if (priv->devtype == I2C_RCAR_GEN3 && ++ /* Gen3+ can only do one RXDMA per transfer and we just completed it */ ++ if (priv->devtype >= I2C_RCAR_GEN3 && + priv->dma_direction == DMA_FROM_DEVICE) + priv->flags |= ID_P_NO_RXDMA; + +@@ -815,6 +824,10 @@ static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv) + { + int ret; + ++ /* Don't reset if a slave instance is currently running */ ++ if (priv->slave) ++ return -EISCONN; ++ + ret = reset_control_reset(priv->rstc); + if (ret) + return ret; +@@ -841,14 +854,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, + if (ret < 0) + goto out; + +- /* Gen3 needs a reset before allowing RXDMA once */ +- if (priv->devtype == I2C_RCAR_GEN3) { +- priv->flags |= ID_P_NO_RXDMA; +- if (!IS_ERR(priv->rstc)) { +- ret = rcar_i2c_do_reset(priv); +- if (ret == 0) +- priv->flags &= ~ID_P_NO_RXDMA; +- } ++ /* Gen3+ needs a reset. That also allows RXDMA once */ ++ if (priv->devtype >= I2C_RCAR_GEN3) { ++ ret = rcar_i2c_do_reset(priv); ++ if (ret) ++ goto out; ++ priv->flags &= ~ID_P_NO_RXDMA; + } + + rcar_i2c_init(priv); +@@ -975,11 +986,8 @@ static int rcar_unreg_slave(struct i2c_client *slave) + + /* ensure no irq is running before clearing ptr */ + disable_irq(priv->irq); +- rcar_i2c_write(priv, ICSIER, 0); +- rcar_i2c_write(priv, ICSSR, 0); ++ rcar_i2c_reset_slave(priv); + enable_irq(priv->irq); +- rcar_i2c_write(priv, ICSCR, SDBS); +- rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ + + priv->slave = NULL; + +@@ -1032,7 +1040,7 @@ static const struct of_device_id rcar_i2c_dt_ids[] = { + { .compatible = "renesas,rcar-gen1-i2c", .data = (void *)I2C_RCAR_GEN1 }, + { .compatible = "renesas,rcar-gen2-i2c", .data = (void *)I2C_RCAR_GEN2 }, + { .compatible = "renesas,rcar-gen3-i2c", .data = (void *)I2C_RCAR_GEN3 }, +- { .compatible = "renesas,rcar-gen4-i2c", .data = (void *)I2C_RCAR_GEN3 }, ++ { .compatible = "renesas,rcar-gen4-i2c", .data = (void *)I2C_RCAR_GEN4 }, + {}, + }; + MODULE_DEVICE_TABLE(of, rcar_i2c_dt_ids); +@@ -1092,22 +1100,15 @@ static int rcar_i2c_probe(struct platform_device *pdev) + goto out_pm_disable; + } + +- rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ ++ /* Bring hardware to known state */ ++ rcar_i2c_init(priv); ++ rcar_i2c_reset_slave(priv); + + if (priv->devtype < I2C_RCAR_GEN3) { + irqflags |= IRQF_NO_THREAD; + irqhandler = rcar_i2c_gen2_irq; + } + +- if (priv->devtype == I2C_RCAR_GEN3) { +- priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); +- if (!IS_ERR(priv->rstc)) { +- ret = reset_control_status(priv->rstc); +- if (ret < 0) +- priv->rstc = ERR_PTR(-ENOTSUPP); +- } +- } +- + /* Stay always active when multi-master to keep arbitration working */ + if (of_property_read_bool(dev->of_node, "multi-master")) + priv->flags |= ID_P_PM_BLOCKED; +@@ -1117,6 +1118,22 @@ static int rcar_i2c_probe(struct platform_device *pdev) + if (of_property_read_bool(dev->of_node, "smbus")) + priv->flags |= ID_P_HOST_NOTIFY; + ++ /* R-Car Gen3+ needs a reset before every transfer */ ++ if (priv->devtype >= I2C_RCAR_GEN3) { ++ priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL); ++ if (IS_ERR(priv->rstc)) { ++ ret = PTR_ERR(priv->rstc); ++ goto out_pm_put; ++ } ++ ++ ret = reset_control_status(priv->rstc); ++ if (ret < 0) ++ goto out_pm_put; ++ ++ /* hard reset disturbs HostNotify local target, so disable it */ ++ priv->flags &= ~ID_P_HOST_NOTIFY; ++ } ++ + ret = platform_get_irq(pdev, 0); + if (ret < 0) + goto out_pm_put; +diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c +index 3642d42463209..1e873ff0a624d 100644 +--- a/drivers/i2c/i2c-core-base.c ++++ b/drivers/i2c/i2c-core-base.c +@@ -1064,6 +1064,7 @@ EXPORT_SYMBOL(i2c_find_device_by_fwnode); + + static const struct i2c_device_id dummy_id[] = { + { "dummy", 0 }, ++ { "smbus_host_notify", 0 }, + { }, + }; + +diff --git a/drivers/i2c/i2c-slave-testunit.c b/drivers/i2c/i2c-slave-testunit.c +index ca43e98cae1b2..23a11e4e92567 100644 +--- a/drivers/i2c/i2c-slave-testunit.c ++++ b/drivers/i2c/i2c-slave-testunit.c +@@ -118,6 +118,13 @@ static int i2c_slave_testunit_slave_cb(struct i2c_client *client, + queue_delayed_work(system_long_wq, &tu->worker, + msecs_to_jiffies(10 * tu->regs[TU_REG_DELAY])); + } ++ ++ /* ++ * Reset reg_idx to avoid that work gets queued again in case of ++ * STOP after a following read message. But do not clear TU regs ++ * here because we still need them in the workqueue! ++ */ ++ tu->reg_idx = 0; + break; + + case I2C_SLAVE_WRITE_REQUESTED: +diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c +index 18f83158f637f..b5fed8a000ea9 100644 +--- a/drivers/iio/industrialio-trigger.c ++++ b/drivers/iio/industrialio-trigger.c +@@ -322,7 +322,7 @@ int iio_trigger_attach_poll_func(struct iio_trigger *trig, + * this is the case if the IIO device and the trigger device share the + * same parent device. + */ +- if (iio_validate_own_trigger(pf->indio_dev, trig)) ++ if (!iio_validate_own_trigger(pf->indio_dev, trig)) + trig->attached_own_device = true; + + return ret; +diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c +index dbd26c3b245bc..bbcbc921f3ab8 100644 +--- a/drivers/misc/fastrpc.c ++++ b/drivers/misc/fastrpc.c +@@ -1238,6 +1238,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, + struct fastrpc_phy_page pages[1]; + char *name; + int err; ++ bool scm_done = false; + struct { + int pgid; + u32 namelen; +@@ -1289,6 +1290,7 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, + fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err); + goto err_map; + } ++ scm_done = true; + } + } + +@@ -1320,10 +1322,11 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl, + goto err_invoke; + + kfree(args); ++ kfree(name); + + return 0; + err_invoke: +- if (fl->cctx->vmcount) { ++ if (fl->cctx->vmcount && scm_done) { + u64 src_perms = 0; + struct qcom_scm_vmperm dst_perms; + u32 i; +@@ -1693,16 +1696,20 @@ static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr + { + struct fastrpc_invoke_args args[2] = { 0 }; + +- /* Capability filled in userspace */ ++ /* ++ * Capability filled in userspace. This carries the information ++ * about the remoteproc support which is fetched from the remoteproc ++ * sysfs node by userspace. ++ */ + dsp_attr_buf[0] = 0; ++ dsp_attr_buf_len -= 1; + + args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len; + args[0].length = sizeof(dsp_attr_buf_len); + args[0].fd = -1; + args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1]; +- args[1].length = dsp_attr_buf_len; ++ args[1].length = dsp_attr_buf_len * sizeof(u32); + args[1].fd = -1; +- fl->pd = USER_PD; + + return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE, + FASTRPC_SCALARS(0, 1, 1), args); +@@ -1730,7 +1737,7 @@ static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap, + if (!dsp_attributes) + return -ENOMEM; + +- err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN); ++ err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES); + if (err == DSP_UNSUPPORTED_API) { + dev_info(&cctx->rpdev->dev, + "Warning: DSP capabilities not supported on domain: %d\n", domain); +@@ -1783,7 +1790,7 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp) + if (err) + return err; + +- if (copy_to_user(argp, &cap.capability, sizeof(cap.capability))) ++ if (copy_to_user(argp, &cap, sizeof(cap))) + return -EFAULT; + + return 0; +@@ -2080,6 +2087,16 @@ static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp) + return err; + } + ++static int is_attach_rejected(struct fastrpc_user *fl) ++{ ++ /* Check if the device node is non-secure */ ++ if (!fl->is_secure_dev) { ++ dev_dbg(&fl->cctx->rpdev->dev, "untrusted app trying to attach to privileged DSP PD\n"); ++ return -EACCES; ++ } ++ return 0; ++} ++ + static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) + { +@@ -2092,13 +2109,19 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, + err = fastrpc_invoke(fl, argp); + break; + case FASTRPC_IOCTL_INIT_ATTACH: +- err = fastrpc_init_attach(fl, ROOT_PD); ++ err = is_attach_rejected(fl); ++ if (!err) ++ err = fastrpc_init_attach(fl, ROOT_PD); + break; + case FASTRPC_IOCTL_INIT_ATTACH_SNS: +- err = fastrpc_init_attach(fl, SENSORS_PD); ++ err = is_attach_rejected(fl); ++ if (!err) ++ err = fastrpc_init_attach(fl, SENSORS_PD); + break; + case FASTRPC_IOCTL_INIT_CREATE_STATIC: +- err = fastrpc_init_create_static_process(fl, argp); ++ err = is_attach_rejected(fl); ++ if (!err) ++ err = fastrpc_init_create_static_process(fl, argp); + break; + case FASTRPC_IOCTL_INIT_CREATE: + err = fastrpc_init_create_process(fl, argp); +diff --git a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c +index 16695cb5e69c7..7c3d8bedf90ba 100644 +--- a/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c ++++ b/drivers/misc/mchp_pci1xxxx/mchp_pci1xxxx_otpe2p.c +@@ -153,7 +153,6 @@ static int pci1xxxx_eeprom_read(void *priv_t, unsigned int off, + + buf[byte] = readl(rb + MMAP_EEPROM_OFFSET(EEPROM_DATA_REG)); + } +- ret = byte; + error: + release_sys_lock(priv); + return ret; +@@ -197,7 +196,6 @@ static int pci1xxxx_eeprom_write(void *priv_t, unsigned int off, + goto error; + } + } +- ret = byte; + error: + release_sys_lock(priv); + return ret; +@@ -258,7 +256,6 @@ static int pci1xxxx_otp_read(void *priv_t, unsigned int off, + + buf[byte] = readl(rb + MMAP_OTP_OFFSET(OTP_RD_DATA_OFFSET)); + } +- ret = byte; + error: + release_sys_lock(priv); + return ret; +@@ -315,7 +312,6 @@ static int pci1xxxx_otp_write(void *priv_t, unsigned int off, + goto error; + } + } +- ret = byte; + error: + release_sys_lock(priv); + return ret; +diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c +index ee67adeb2cdbf..24ef219e6f565 100644 +--- a/drivers/net/dsa/lan9303-core.c ++++ b/drivers/net/dsa/lan9303-core.c +@@ -1048,31 +1048,31 @@ static int lan9303_get_sset_count(struct dsa_switch *ds, int port, int sset) + return ARRAY_SIZE(lan9303_mib); + } + +-static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum) ++static int lan9303_phy_read(struct dsa_switch *ds, int port, int regnum) + { + struct lan9303 *chip = ds->priv; + int phy_base = chip->phy_addr_base; + +- if (phy == phy_base) ++ if (port == 0) + return lan9303_virt_phy_reg_read(chip, regnum); +- if (phy > phy_base + 2) ++ if (port > 2) + return -ENODEV; + +- return chip->ops->phy_read(chip, phy, regnum); ++ return chip->ops->phy_read(chip, phy_base + port, regnum); + } + +-static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum, ++static int lan9303_phy_write(struct dsa_switch *ds, int port, int regnum, + u16 val) + { + struct lan9303 *chip = ds->priv; + int phy_base = chip->phy_addr_base; + +- if (phy == phy_base) ++ if (port == 0) + return lan9303_virt_phy_reg_write(chip, regnum, val); +- if (phy > phy_base + 2) ++ if (port > 2) + return -ENODEV; + +- return chip->ops->phy_write(chip, phy, regnum, val); ++ return chip->ops->phy_write(chip, phy_base + port, regnum, val); + } + + static int lan9303_port_enable(struct dsa_switch *ds, int port, +@@ -1100,7 +1100,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port) + vlan_vid_del(dsa_port_to_master(dp), htons(ETH_P_8021Q), port); + + lan9303_disable_processing_port(chip, port); +- lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN); ++ lan9303_phy_write(ds, port, MII_BMCR, BMCR_PDOWN); + } + + static int lan9303_port_bridge_join(struct dsa_switch *ds, int port, +@@ -1355,8 +1355,6 @@ static const struct dsa_switch_ops lan9303_switch_ops = { + + static int lan9303_register_switch(struct lan9303 *chip) + { +- int base; +- + chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL); + if (!chip->ds) + return -ENOMEM; +@@ -1365,8 +1363,7 @@ static int lan9303_register_switch(struct lan9303 *chip) + chip->ds->num_ports = LAN9303_NUM_PORTS; + chip->ds->priv = chip; + chip->ds->ops = &lan9303_switch_ops; +- base = chip->phy_addr_base; +- chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base); ++ chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1, 0); + + return dsa_register_switch(chip->ds); + } +diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c +index 4b6bf2764bef7..d9e9ec2e8945d 100644 +--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c ++++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c +@@ -1306,6 +1306,7 @@ static int bcmasp_probe(struct platform_device *pdev) + dev_err(dev, "Cannot create eth interface %d\n", i); + bcmasp_remove_intfs(priv); + of_node_put(intf_node); ++ ret = -ENOMEM; + goto of_put_exit; + } + list_add_tail(&intf->list, &priv->intfs); +diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h +index 80125bea80a2a..290c23cec2fca 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h ++++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h +@@ -116,10 +116,6 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) + -EFBIG, /* I40E_AQ_RC_EFBIG */ + }; + +- /* aq_rc is invalid if AQ timed out */ +- if (aq_ret == -EIO) +- return -EAGAIN; +- + if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])))) + return -ERANGE; + +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c +index f8d1a994c2f65..1d241ebd04ec7 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c +@@ -13372,6 +13372,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, + bool need_reset; + int i; + ++ /* VSI shall be deleted in a moment, block loading new programs */ ++ if (prog && test_bit(__I40E_IN_REMOVE, pf->state)) ++ return -EINVAL; ++ + /* Don't allow frames that span over multiple buffers */ + if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) { + NL_SET_ERR_MSG_MOD(extack, "MTU too large for linear frames and XDP prog does not support frags"); +@@ -13380,14 +13384,9 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, + + /* When turning XDP on->off/off->on we reset and rebuild the rings. */ + need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog); +- + if (need_reset) + i40e_prep_for_reset(pf); + +- /* VSI shall be deleted in a moment, just return EINVAL */ +- if (test_bit(__I40E_IN_REMOVE, pf->state)) +- return -EINVAL; +- + old_prog = xchg(&vsi->xdp_prog, prog); + + if (need_reset) { +diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c +index f5961bdcc4809..61baf1da76eea 100644 +--- a/drivers/net/ethernet/lantiq_etop.c ++++ b/drivers/net/ethernet/lantiq_etop.c +@@ -217,9 +217,9 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch) + if (ch->dma.irq) + free_irq(ch->dma.irq, priv); + if (IS_RX(ch->idx)) { +- int desc; ++ struct ltq_dma_channel *dma = &ch->dma; + +- for (desc = 0; desc < LTQ_DESC_NUM; desc++) ++ for (dma->desc = 0; dma->desc < LTQ_DESC_NUM; dma->desc++) + dev_kfree_skb_any(ch->skb[ch->dma.desc]); + } + } +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +index 4c73575fba7be..e883c0929b1a9 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h ++++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +@@ -1661,7 +1661,7 @@ struct cpt_lf_alloc_req_msg { + u16 nix_pf_func; + u16 sso_pf_func; + u16 eng_grpmsk; +- int blkaddr; ++ u8 blkaddr; + u8 ctx_ilen_valid : 1; + u8 ctx_ilen : 7; + }; +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h +index c92c3f4631d54..2c028a81bbc51 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h ++++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h +@@ -63,8 +63,13 @@ enum npc_kpu_lb_ltype { + NPC_LT_LB_CUSTOM1 = 0xF, + }; + ++/* Don't modify ltypes up to IP6_EXT, otherwise length and checksum of IP ++ * headers may not be checked correctly. IPv4 ltypes and IPv6 ltypes must ++ * differ only at bit 0 so mask 0xE can be used to detect extended headers. ++ */ + enum npc_kpu_lc_ltype { +- NPC_LT_LC_IP = 1, ++ NPC_LT_LC_PTP = 1, ++ NPC_LT_LC_IP, + NPC_LT_LC_IP_OPT, + NPC_LT_LC_IP6, + NPC_LT_LC_IP6_EXT, +@@ -72,7 +77,6 @@ enum npc_kpu_lc_ltype { + NPC_LT_LC_RARP, + NPC_LT_LC_MPLS, + NPC_LT_LC_NSH, +- NPC_LT_LC_PTP, + NPC_LT_LC_FCOE, + NPC_LT_LC_NGIO, + NPC_LT_LC_CUSTOM0 = 0xE, +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +index 32645aefd5934..5906f5f8d1904 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +@@ -1638,7 +1638,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu, + if (req->ssow > block->lf.max) { + dev_err(&rvu->pdev->dev, + "Func 0x%x: Invalid SSOW req, %d > max %d\n", +- pcifunc, req->sso, block->lf.max); ++ pcifunc, req->ssow, block->lf.max); + return -EINVAL; + } + mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr); +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +index f047185f38e0f..3e09d22858147 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +@@ -696,7 +696,8 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, + struct cpt_rd_wr_reg_msg *req, + struct cpt_rd_wr_reg_msg *rsp) + { +- int blkaddr; ++ u64 offset = req->reg_offset; ++ int blkaddr, lf; + + blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr); + if (blkaddr < 0) +@@ -707,17 +708,25 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu, + !is_cpt_vf(rvu, req->hdr.pcifunc)) + return CPT_AF_ERR_ACCESS_DENIED; + +- rsp->reg_offset = req->reg_offset; +- rsp->ret_val = req->ret_val; +- rsp->is_write = req->is_write; +- + if (!is_valid_offset(rvu, req)) + return CPT_AF_ERR_ACCESS_DENIED; + ++ /* Translate local LF used by VFs to global CPT LF */ ++ lf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], req->hdr.pcifunc, ++ (offset & 0xFFF) >> 3); ++ ++ /* Translate local LF's offset to global CPT LF's offset */ ++ offset &= 0xFF000; ++ offset += lf << 3; ++ ++ rsp->reg_offset = offset; ++ rsp->ret_val = req->ret_val; ++ rsp->is_write = req->is_write; ++ + if (req->is_write) +- rvu_write64(rvu, blkaddr, req->reg_offset, req->val); ++ rvu_write64(rvu, blkaddr, offset, req->val); + else +- rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset); ++ rsp->val = rvu_read64(rvu, blkaddr, offset); + + return 0; + } +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +index f6f6d7c04e8bf..a07e5c8786c4b 100644 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +@@ -3517,6 +3517,11 @@ static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) + return -ERANGE; + } + ++/* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */ ++#define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf) ++/* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */ ++#define NPC_LT_LC_IP_MATCH_MSK ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf) ++ + static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) + { + int idx, nr_field, key_off, field_marker, keyoff_marker; +@@ -3586,7 +3591,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) + field->hdr_offset = 9; /* offset */ + field->bytesm1 = 0; /* 1 byte */ + field->ltype_match = NPC_LT_LC_IP; +- field->ltype_mask = 0xF; ++ field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; + break; + case NIX_FLOW_KEY_TYPE_IPV4: + case NIX_FLOW_KEY_TYPE_INNR_IPV4: +@@ -3613,8 +3618,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) + field->bytesm1 = 3; /* DIP, 4 bytes */ + } + } +- +- field->ltype_mask = 0xF; /* Match only IPv4 */ ++ field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK; + keyoff_marker = false; + break; + case NIX_FLOW_KEY_TYPE_IPV6: +@@ -3643,7 +3647,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) + field->bytesm1 = 15; /* DIP,16 bytes */ + } + } +- field->ltype_mask = 0xF; /* Match only IPv6 */ ++ field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK; + break; + case NIX_FLOW_KEY_TYPE_TCP: + case NIX_FLOW_KEY_TYPE_UDP: +diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c +index 31aebeb2e2858..25989c79c92e6 100644 +--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c ++++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c +@@ -1524,6 +1524,7 @@ static int mtk_star_probe(struct platform_device *pdev) + { + struct device_node *of_node; + struct mtk_star_priv *priv; ++ struct phy_device *phydev; + struct net_device *ndev; + struct device *dev; + void __iomem *base; +@@ -1649,6 +1650,12 @@ static int mtk_star_probe(struct platform_device *pdev) + netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll); + netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll); + ++ phydev = of_phy_find_device(priv->phy_node); ++ if (phydev) { ++ phydev->mac_managed_pm = true; ++ put_device(&phydev->mdio.dev); ++ } ++ + return devm_register_netdev(dev, ndev); + } + +diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c +index 6453c92f0fa7c..7fa1820db9cce 100644 +--- a/drivers/net/ethernet/micrel/ks8851_common.c ++++ b/drivers/net/ethernet/micrel/ks8851_common.c +@@ -352,11 +352,11 @@ static irqreturn_t ks8851_irq(int irq, void *_ks) + netif_dbg(ks, intr, ks->netdev, + "%s: txspace %d\n", __func__, tx_space); + +- spin_lock(&ks->statelock); ++ spin_lock_bh(&ks->statelock); + ks->tx_space = tx_space; + if (netif_queue_stopped(ks->netdev)) + netif_wake_queue(ks->netdev); +- spin_unlock(&ks->statelock); ++ spin_unlock_bh(&ks->statelock); + } + + if (status & IRQ_SPIBEI) { +@@ -482,6 +482,7 @@ static int ks8851_net_open(struct net_device *dev) + ks8851_wrreg16(ks, KS_IER, ks->rc_ier); + + ks->queued_len = 0; ++ ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR); + netif_start_queue(ks->netdev); + + netif_dbg(ks, ifup, ks->netdev, "network device up\n"); +@@ -635,14 +636,14 @@ static void ks8851_set_rx_mode(struct net_device *dev) + + /* schedule work to do the actual set of the data if needed */ + +- spin_lock(&ks->statelock); ++ spin_lock_bh(&ks->statelock); + + if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) { + memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl)); + schedule_work(&ks->rxctrl_work); + } + +- spin_unlock(&ks->statelock); ++ spin_unlock_bh(&ks->statelock); + } + + static int ks8851_set_mac_address(struct net_device *dev, void *addr) +@@ -1101,7 +1102,6 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev, + int ret; + + ks->netdev = netdev; +- ks->tx_space = 6144; + + ks->gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + ret = PTR_ERR_OR_ZERO(ks->gpio); +diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c +index 4dcbff789b19d..e33a5e7beb39e 100644 +--- a/drivers/net/ethernet/micrel/ks8851_spi.c ++++ b/drivers/net/ethernet/micrel/ks8851_spi.c +@@ -340,10 +340,10 @@ static void ks8851_tx_work(struct work_struct *work) + + tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR); + +- spin_lock(&ks->statelock); ++ spin_lock_bh(&ks->statelock); + ks->queued_len -= dequeued_len; + ks->tx_space = tx_space; +- spin_unlock(&ks->statelock); ++ spin_unlock_bh(&ks->statelock); + + ks8851_unlock_spi(ks, &flags); + } +diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c +index a838b61cd844b..a35528497a576 100644 +--- a/drivers/net/phy/microchip_t1.c ++++ b/drivers/net/phy/microchip_t1.c +@@ -748,7 +748,7 @@ static int lan87xx_cable_test_report(struct phy_device *phydev) + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, + lan87xx_cable_test_report_trans(detect)); + +- return 0; ++ return phy_init_hw(phydev); + } + + static int lan87xx_cable_test_get_status(struct phy_device *phydev, +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c +index a9beacd552cf8..370c84f393759 100644 +--- a/drivers/net/ppp/ppp_generic.c ++++ b/drivers/net/ppp/ppp_generic.c +@@ -70,6 +70,7 @@ + #define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */ + + #define PPP_PROTO_LEN 2 ++#define PPP_LCP_HDRLEN 4 + + /* + * An instance of /dev/ppp can be associated with either a ppp +@@ -491,6 +492,15 @@ static ssize_t ppp_read(struct file *file, char __user *buf, + return ret; + } + ++static bool ppp_check_packet(struct sk_buff *skb, size_t count) ++{ ++ /* LCP packets must include LCP header which 4 bytes long: ++ * 1-byte code, 1-byte identifier, and 2-byte length. ++ */ ++ return get_unaligned_be16(skb->data) != PPP_LCP || ++ count >= PPP_PROTO_LEN + PPP_LCP_HDRLEN; ++} ++ + static ssize_t ppp_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) + { +@@ -513,6 +523,11 @@ static ssize_t ppp_write(struct file *file, const char __user *buf, + kfree_skb(skb); + goto out; + } ++ ret = -EINVAL; ++ if (unlikely(!ppp_check_packet(skb, count))) { ++ kfree_skb(skb); ++ goto out; ++ } + + switch (pf->kind) { + case INTERFACE: +diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c +index 0ba714ca5185c..4b8528206cc8a 100644 +--- a/drivers/net/wireguard/allowedips.c ++++ b/drivers/net/wireguard/allowedips.c +@@ -15,8 +15,8 @@ static void swap_endian(u8 *dst, const u8 *src, u8 bits) + if (bits == 32) { + *(u32 *)dst = be32_to_cpu(*(const __be32 *)src); + } else if (bits == 128) { +- ((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]); +- ((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]); ++ ((u64 *)dst)[0] = get_unaligned_be64(src); ++ ((u64 *)dst)[1] = get_unaligned_be64(src + 8); + } + } + +diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h +index 1ea4f874e367e..7eb76724b3edb 100644 +--- a/drivers/net/wireguard/queueing.h ++++ b/drivers/net/wireguard/queueing.h +@@ -124,10 +124,10 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id) + */ + static inline int wg_cpumask_next_online(int *last_cpu) + { +- int cpu = cpumask_next(*last_cpu, cpu_online_mask); ++ int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask); + if (cpu >= nr_cpu_ids) + cpu = cpumask_first(cpu_online_mask); +- *last_cpu = cpu; ++ WRITE_ONCE(*last_cpu, cpu); + return cpu; + } + +diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c +index 0d48e0f4a1ba3..26e09c30d596c 100644 +--- a/drivers/net/wireguard/send.c ++++ b/drivers/net/wireguard/send.c +@@ -222,7 +222,7 @@ void wg_packet_send_keepalive(struct wg_peer *peer) + { + struct sk_buff *skb; + +- if (skb_queue_empty(&peer->staged_packet_queue)) { ++ if (skb_queue_empty_lockless(&peer->staged_packet_queue)) { + skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH, + GFP_ATOMIC); + if (unlikely(!skb)) +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c +index e26f79a132bb5..040dfa01fa12e 100644 +--- a/drivers/nvmem/core.c ++++ b/drivers/nvmem/core.c +@@ -374,10 +374,9 @@ static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, + if (!config->base_dev) + return -EINVAL; + +- if (config->type == NVMEM_TYPE_FRAM) +- bin_attr_nvmem_eeprom_compat.attr.name = "fram"; +- + nvmem->eeprom = bin_attr_nvmem_eeprom_compat; ++ if (config->type == NVMEM_TYPE_FRAM) ++ nvmem->eeprom.attr.name = "fram"; + nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem); + nvmem->eeprom.size = nvmem->size; + #ifdef CONFIG_DEBUG_LOCK_ALLOC +diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c +index 33678d0af2c24..6c2f80e166e28 100644 +--- a/drivers/nvmem/meson-efuse.c ++++ b/drivers/nvmem/meson-efuse.c +@@ -18,18 +18,24 @@ static int meson_efuse_read(void *context, unsigned int offset, + void *val, size_t bytes) + { + struct meson_sm_firmware *fw = context; ++ int ret; + +- return meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset, +- bytes, 0, 0, 0); ++ ret = meson_sm_call_read(fw, (u8 *)val, bytes, SM_EFUSE_READ, offset, ++ bytes, 0, 0, 0); ++ ++ return ret < 0 ? ret : 0; + } + + static int meson_efuse_write(void *context, unsigned int offset, + void *val, size_t bytes) + { + struct meson_sm_firmware *fw = context; ++ int ret; ++ ++ ret = meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset, ++ bytes, 0, 0, 0); + +- return meson_sm_call_write(fw, (u8 *)val, bytes, SM_EFUSE_WRITE, offset, +- bytes, 0, 0, 0); ++ return ret < 0 ? ret : 0; + } + + static const struct of_device_id meson_efuse_match[] = { +diff --git a/drivers/nvmem/rmem.c b/drivers/nvmem/rmem.c +index 752d0bf4445ee..7f907c5a445e7 100644 +--- a/drivers/nvmem/rmem.c ++++ b/drivers/nvmem/rmem.c +@@ -46,7 +46,10 @@ static int rmem_read(void *context, unsigned int offset, + + memunmap(addr); + +- return count; ++ if (count < 0) ++ return count; ++ ++ return count == bytes ? 0 : -EIO; + } + + static int rmem_probe(struct platform_device *pdev) +diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c +index 05d8a442a18a9..26991b2f7ae91 100644 +--- a/drivers/platform/x86/toshiba_acpi.c ++++ b/drivers/platform/x86/toshiba_acpi.c +@@ -3303,6 +3303,7 @@ static const struct dmi_system_id toshiba_dmi_quirks[] __initconst = { + }, + .driver_data = (void *)(QUIRK_TURN_ON_PANEL_ON_RESUME | QUIRK_HCI_HOTKEY_QUICKSTART), + }, ++ { } + }; + + static int toshiba_acpi_add(struct acpi_device *acpi_dev) +diff --git a/drivers/pmdomain/qcom/rpmhpd.c b/drivers/pmdomain/qcom/rpmhpd.c +index efca30c185a15..1bb9f70ab04c8 100644 +--- a/drivers/pmdomain/qcom/rpmhpd.c ++++ b/drivers/pmdomain/qcom/rpmhpd.c +@@ -40,6 +40,7 @@ + * @addr: Resource address as looped up using resource name from + * cmd-db + * @state_synced: Indicator that sync_state has been invoked for the rpmhpd resource ++ * @skip_retention_level: Indicate that retention level should not be used for the power domain + */ + struct rpmhpd { + struct device *dev; +@@ -56,6 +57,7 @@ struct rpmhpd { + const char *res_name; + u32 addr; + bool state_synced; ++ bool skip_retention_level; + }; + + struct rpmhpd_desc { +@@ -173,6 +175,7 @@ static struct rpmhpd mxc = { + .pd = { .name = "mxc", }, + .peer = &mxc_ao, + .res_name = "mxc.lvl", ++ .skip_retention_level = true, + }; + + static struct rpmhpd mxc_ao = { +@@ -180,6 +183,7 @@ static struct rpmhpd mxc_ao = { + .active_only = true, + .peer = &mxc, + .res_name = "mxc.lvl", ++ .skip_retention_level = true, + }; + + static struct rpmhpd nsp = { +@@ -749,6 +753,9 @@ static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd) + return -EINVAL; + + for (i = 0; i < rpmhpd->level_count; i++) { ++ if (rpmhpd->skip_retention_level && buf[i] == RPMH_REGULATOR_LEVEL_RETENTION) ++ continue; ++ + rpmhpd->level[i] = buf[i]; + + /* Remember the first corner with non-zero level */ +diff --git a/drivers/tty/serial/ma35d1_serial.c b/drivers/tty/serial/ma35d1_serial.c +index 69da24565b997..99225f1e02ac8 100644 +--- a/drivers/tty/serial/ma35d1_serial.c ++++ b/drivers/tty/serial/ma35d1_serial.c +@@ -688,12 +688,13 @@ static int ma35d1serial_probe(struct platform_device *pdev) + struct uart_ma35d1_port *up; + int ret = 0; + +- if (pdev->dev.of_node) { +- ret = of_alias_get_id(pdev->dev.of_node, "serial"); +- if (ret < 0) { +- dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", ret); +- return ret; +- } ++ if (!pdev->dev.of_node) ++ return -ENODEV; ++ ++ ret = of_alias_get_id(pdev->dev.of_node, "serial"); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", ret); ++ return ret; + } + up = &ma35d1serial_ports[ret]; + up->port.line = ret; +diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c +index 4e84ee6564d4b..a10fc7a697109 100644 +--- a/drivers/ufs/core/ufs-mcq.c ++++ b/drivers/ufs/core/ufs-mcq.c +@@ -105,16 +105,15 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac); + * @hba: per adapter instance + * @req: pointer to the request to be issued + * +- * Return: the hardware queue instance on which the request would +- * be queued. ++ * Return: the hardware queue instance on which the request will be or has ++ * been queued. %NULL if the request has already been freed. + */ + struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, + struct request *req) + { +- u32 utag = blk_mq_unique_tag(req); +- u32 hwq = blk_mq_unique_tag_to_hwq(utag); ++ struct blk_mq_hw_ctx *hctx = READ_ONCE(req->mq_hctx); + +- return &hba->uhq[hwq]; ++ return hctx ? &hba->uhq[hctx->queue_num] : NULL; + } + + /** +@@ -511,6 +510,8 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag) + if (!cmd) + return -EINVAL; + hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd)); ++ if (!hwq) ++ return 0; + } else { + hwq = hba->dev_cmd_queue; + } +diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c +index 7bb820bfd7437..808979a093505 100644 +--- a/drivers/ufs/core/ufshcd.c ++++ b/drivers/ufs/core/ufshcd.c +@@ -6363,6 +6363,8 @@ static bool ufshcd_abort_one(struct request *rq, void *priv) + /* Release cmd in MCQ mode if abort succeeds */ + if (is_mcq_enabled(hba) && (*ret == 0)) { + hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd)); ++ if (!hwq) ++ return 0; + spin_lock_irqsave(&hwq->cq_lock, flags); + if (ufshcd_cmd_inflight(lrbp->cmd)) + ufshcd_release_scsi_cmd(hba, lrbp); +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c +index 7f8d33f92ddb5..847dd32c0f5e2 100644 +--- a/drivers/usb/core/config.c ++++ b/drivers/usb/core/config.c +@@ -291,6 +291,20 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, + if (ifp->desc.bNumEndpoints >= num_ep) + goto skip_to_next_endpoint_or_interface_descriptor; + ++ /* Save a copy of the descriptor and use it instead of the original */ ++ endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; ++ memcpy(&endpoint->desc, d, n); ++ d = &endpoint->desc; ++ ++ /* Clear the reserved bits in bEndpointAddress */ ++ i = d->bEndpointAddress & ++ (USB_ENDPOINT_DIR_MASK | USB_ENDPOINT_NUMBER_MASK); ++ if (i != d->bEndpointAddress) { ++ dev_notice(ddev, "config %d interface %d altsetting %d has an endpoint descriptor with address 0x%X, changing to 0x%X\n", ++ cfgno, inum, asnum, d->bEndpointAddress, i); ++ endpoint->desc.bEndpointAddress = i; ++ } ++ + /* Check for duplicate endpoint addresses */ + if (config_endpoint_is_duplicate(config, inum, asnum, d)) { + dev_notice(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", +@@ -308,10 +322,8 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, + } + } + +- endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; ++ /* Accept this endpoint */ + ++ifp->desc.bNumEndpoints; +- +- memcpy(&endpoint->desc, d, n); + INIT_LIST_HEAD(&endpoint->urb_list); + + /* +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index b4783574b8e66..13171454f9591 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -506,6 +506,9 @@ static const struct usb_device_id usb_quirk_list[] = { + { USB_DEVICE(0x1b1c, 0x1b38), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_DELAY_CTRL_MSG }, + ++ /* START BP-850k Printer */ ++ { USB_DEVICE(0x1bc3, 0x0003), .driver_info = USB_QUIRK_NO_SET_INTF }, ++ + /* MIDI keyboard WORLDE MINI */ + { USB_DEVICE(0x1c75, 0x0204), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c +index 9ef821ca2fc71..052852f801467 100644 +--- a/drivers/usb/dwc3/dwc3-pci.c ++++ b/drivers/usb/dwc3/dwc3-pci.c +@@ -54,6 +54,10 @@ + #define PCI_DEVICE_ID_INTEL_MTL 0x7e7e + #define PCI_DEVICE_ID_INTEL_ARLH_PCH 0x777e + #define PCI_DEVICE_ID_INTEL_TGL 0x9a15 ++#define PCI_DEVICE_ID_INTEL_PTLH 0xe332 ++#define PCI_DEVICE_ID_INTEL_PTLH_PCH 0xe37e ++#define PCI_DEVICE_ID_INTEL_PTLU 0xe432 ++#define PCI_DEVICE_ID_INTEL_PTLU_PCH 0xe47e + #define PCI_DEVICE_ID_AMD_MR 0x163a + + #define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" +@@ -430,6 +434,10 @@ static const struct pci_device_id dwc3_pci_id_table[] = { + { PCI_DEVICE_DATA(INTEL, MTLS, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, ARLH_PCH, &dwc3_pci_intel_swnode) }, + { PCI_DEVICE_DATA(INTEL, TGL, &dwc3_pci_intel_swnode) }, ++ { PCI_DEVICE_DATA(INTEL, PTLH, &dwc3_pci_intel_swnode) }, ++ { PCI_DEVICE_DATA(INTEL, PTLH_PCH, &dwc3_pci_intel_swnode) }, ++ { PCI_DEVICE_DATA(INTEL, PTLU, &dwc3_pci_intel_swnode) }, ++ { PCI_DEVICE_DATA(INTEL, PTLU_PCH, &dwc3_pci_intel_swnode) }, + + { PCI_DEVICE_DATA(AMD, NL_USB, &dwc3_pci_amd_swnode) }, + { PCI_DEVICE_DATA(AMD, MR, &dwc3_pci_amd_mr_swnode) }, +diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c +index 4c639e9ddedc0..60a1abfc56547 100644 +--- a/drivers/usb/gadget/configfs.c ++++ b/drivers/usb/gadget/configfs.c +@@ -115,9 +115,12 @@ static int usb_string_copy(const char *s, char **s_copy) + int ret; + char *str; + char *copy = *s_copy; ++ + ret = strlen(s); + if (ret > USB_MAX_STRING_LEN) + return -EOVERFLOW; ++ if (ret < 1) ++ return -EINVAL; + + if (copy) { + str = copy; +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 573b5784d1c3d..3c9b4ae3d33b3 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -1084,10 +1084,20 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) + xhci_dbg(xhci, "Start the secondary HCD\n"); + retval = xhci_run(xhci->shared_hcd); + } +- ++ if (retval) ++ return retval; ++ /* ++ * Resume roothubs unconditionally as PORTSC change bits are not ++ * immediately visible after xHC reset ++ */ + hcd->state = HC_STATE_SUSPENDED; +- if (xhci->shared_hcd) ++ ++ if (xhci->shared_hcd) { + xhci->shared_hcd->state = HC_STATE_SUSPENDED; ++ usb_hcd_resume_root_hub(xhci->shared_hcd); ++ } ++ usb_hcd_resume_root_hub(hcd); ++ + goto done; + } + +@@ -1111,7 +1121,6 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) + + xhci_dbc_resume(xhci); + +- done: + if (retval == 0) { + /* + * Resume roothubs only if there are pending events. +@@ -1137,6 +1146,7 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) + usb_hcd_resume_root_hub(hcd); + } + } ++done: + /* + * If system is subject to the Quirk, Compliance Mode Timer needs to + * be re-initialized Always after a system resume. Ports are subject +diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c +index 8b0308d84270f..85697466b1476 100644 +--- a/drivers/usb/serial/mos7840.c ++++ b/drivers/usb/serial/mos7840.c +@@ -1737,6 +1737,49 @@ static void mos7840_port_remove(struct usb_serial_port *port) + kfree(mos7840_port); + } + ++static int mos7840_suspend(struct usb_serial *serial, pm_message_t message) ++{ ++ struct moschip_port *mos7840_port; ++ struct usb_serial_port *port; ++ int i; ++ ++ for (i = 0; i < serial->num_ports; ++i) { ++ port = serial->port[i]; ++ if (!tty_port_initialized(&port->port)) ++ continue; ++ ++ mos7840_port = usb_get_serial_port_data(port); ++ ++ usb_kill_urb(mos7840_port->read_urb); ++ mos7840_port->read_urb_busy = false; ++ } ++ ++ return 0; ++} ++ ++static int mos7840_resume(struct usb_serial *serial) ++{ ++ struct moschip_port *mos7840_port; ++ struct usb_serial_port *port; ++ int res; ++ int i; ++ ++ for (i = 0; i < serial->num_ports; ++i) { ++ port = serial->port[i]; ++ if (!tty_port_initialized(&port->port)) ++ continue; ++ ++ mos7840_port = usb_get_serial_port_data(port); ++ ++ mos7840_port->read_urb_busy = true; ++ res = usb_submit_urb(mos7840_port->read_urb, GFP_NOIO); ++ if (res) ++ mos7840_port->read_urb_busy = false; ++ } ++ ++ return 0; ++} ++ + static struct usb_serial_driver moschip7840_4port_device = { + .driver = { + .owner = THIS_MODULE, +@@ -1764,6 +1807,8 @@ static struct usb_serial_driver moschip7840_4port_device = { + .port_probe = mos7840_port_probe, + .port_remove = mos7840_port_remove, + .read_bulk_callback = mos7840_bulk_in_callback, ++ .suspend = mos7840_suspend, ++ .resume = mos7840_resume, + }; + + static struct usb_serial_driver * const serial_drivers[] = { +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 8a5846d4adf67..311040f9b9352 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -1425,6 +1425,10 @@ static const struct usb_device_id option_ids[] = { + .driver_info = NCTRL(0) | RSVD(1) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */ + .driver_info = NCTRL(0) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x3000, 0xff), /* Telit FN912 */ ++ .driver_info = RSVD(0) | NCTRL(3) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x3001, 0xff), /* Telit FN912 */ ++ .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7010, 0xff), /* Telit LE910-S1 (RNDIS) */ + .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x7011, 0xff), /* Telit LE910-S1 (ECM) */ +@@ -1433,6 +1437,8 @@ static const struct usb_device_id option_ids[] = { + .driver_info = NCTRL(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x701b, 0xff), /* Telit LE910R1 (ECM) */ + .driver_info = NCTRL(2) }, ++ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x9000, 0xff), /* Telit generic core-dump device */ ++ .driver_info = NCTRL(0) }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9010), /* Telit SBL FN980 flashing device */ + .driver_info = NCTRL(0) | ZLP }, + { USB_DEVICE(TELIT_VENDOR_ID, 0x9200), /* Telit LE910S1 flashing device */ +@@ -2224,6 +2230,10 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_7106_2COM, 0x02, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7126, 0xff, 0x00, 0x00), ++ .driver_info = NCTRL(2) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x7127, 0xff, 0x00, 0x00), ++ .driver_info = NCTRL(2) | NCTRL(3) | NCTRL(4) }, + { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, + { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MPL200), + .driver_info = RSVD(1) | RSVD(4) }, +@@ -2284,6 +2294,8 @@ static const struct usb_device_id option_ids[] = { + .driver_info = RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0f0, 0xff), /* Foxconn T99W373 MBIM */ + .driver_info = RSVD(3) }, ++ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe145, 0xff), /* Foxconn T99W651 RNDIS */ ++ .driver_info = RSVD(5) | RSVD(6) }, + { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */ + .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, + { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */ +@@ -2321,6 +2333,32 @@ static const struct usb_device_id option_ids[] = { + .driver_info = RSVD(4) }, + { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff), /* Rolling RW135-GL (laptop MBIM) */ + .driver_info = RSVD(5) }, ++ { USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0802, 0xff), /* Rolling RW350-GL (laptop MBIM) */ ++ .driver_info = RSVD(5) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for Global */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0x00, 0x40) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0100, 0xff, 0xff, 0x40) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WRD for Global SKU */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0x00, 0x40) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0101, 0xff, 0xff, 0x40) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WRD for China SKU */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0x00, 0x40) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0106, 0xff, 0xff, 0x40) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for SA */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0x00, 0x40) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0111, 0xff, 0xff, 0x40) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for EU */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0x00, 0x40) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0112, 0xff, 0xff, 0x40) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for NA */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0x00, 0x40) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0113, 0xff, 0xff, 0x40) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for China EDU */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0x00, 0x40) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0115, 0xff, 0xff, 0x40) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x30) }, /* NetPrisma LCUK54-WWD for Golbal EDU */ ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0x00, 0x40) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(0x3731, 0x0116, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) }, +diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c +index a3c545dd174ee..a8f259bc2f4d0 100644 +--- a/drivers/vfio/pci/vfio_pci_core.c ++++ b/drivers/vfio/pci/vfio_pci_core.c +@@ -1260,7 +1260,7 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info( + struct vfio_pci_hot_reset_info hdr; + struct vfio_pci_fill_info fill = {}; + bool slot = false; +- int ret, count; ++ int ret, count = 0; + + if (copy_from_user(&hdr, arg, minsz)) + return -EFAULT; +diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c +index cc6bc5985120d..5d6cfa618dc47 100644 +--- a/fs/btrfs/tree-checker.c ++++ b/fs/btrfs/tree-checker.c +@@ -29,6 +29,7 @@ + #include "accessors.h" + #include "file-item.h" + #include "inode-item.h" ++#include "extent-tree.h" + + /* + * Error message should follow the following format: +@@ -1274,6 +1275,8 @@ static int check_extent_item(struct extent_buffer *leaf, + unsigned long ptr; /* Current pointer inside inline refs */ + unsigned long end; /* Extent item end */ + const u32 item_size = btrfs_item_size(leaf, slot); ++ u8 last_type = 0; ++ u64 last_seq = U64_MAX; + u64 flags; + u64 generation; + u64 total_refs; /* Total refs in btrfs_extent_item */ +@@ -1320,6 +1323,18 @@ static int check_extent_item(struct extent_buffer *leaf, + * 2.2) Ref type specific data + * Either using btrfs_extent_inline_ref::offset, or specific + * data structure. ++ * ++ * All above inline items should follow the order: ++ * ++ * - All btrfs_extent_inline_ref::type should be in an ascending ++ * order ++ * ++ * - Within the same type, the items should follow a descending ++ * order by their sequence number. The sequence number is ++ * determined by: ++ * * btrfs_extent_inline_ref::offset for all types other than ++ * EXTENT_DATA_REF ++ * * hash_extent_data_ref() for EXTENT_DATA_REF + */ + if (unlikely(item_size < sizeof(*ei))) { + extent_err(leaf, slot, +@@ -1401,6 +1416,7 @@ static int check_extent_item(struct extent_buffer *leaf, + struct btrfs_extent_inline_ref *iref; + struct btrfs_extent_data_ref *dref; + struct btrfs_shared_data_ref *sref; ++ u64 seq; + u64 dref_offset; + u64 inline_offset; + u8 inline_type; +@@ -1414,6 +1430,7 @@ static int check_extent_item(struct extent_buffer *leaf, + iref = (struct btrfs_extent_inline_ref *)ptr; + inline_type = btrfs_extent_inline_ref_type(leaf, iref); + inline_offset = btrfs_extent_inline_ref_offset(leaf, iref); ++ seq = inline_offset; + if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) { + extent_err(leaf, slot, + "inline ref item overflows extent item, ptr %lu iref size %u end %lu", +@@ -1444,6 +1461,10 @@ static int check_extent_item(struct extent_buffer *leaf, + case BTRFS_EXTENT_DATA_REF_KEY: + dref = (struct btrfs_extent_data_ref *)(&iref->offset); + dref_offset = btrfs_extent_data_ref_offset(leaf, dref); ++ seq = hash_extent_data_ref( ++ btrfs_extent_data_ref_root(leaf, dref), ++ btrfs_extent_data_ref_objectid(leaf, dref), ++ btrfs_extent_data_ref_offset(leaf, dref)); + if (unlikely(!IS_ALIGNED(dref_offset, + fs_info->sectorsize))) { + extent_err(leaf, slot, +@@ -1470,6 +1491,24 @@ static int check_extent_item(struct extent_buffer *leaf, + inline_type); + return -EUCLEAN; + } ++ if (inline_type < last_type) { ++ extent_err(leaf, slot, ++ "inline ref out-of-order: has type %u, prev type %u", ++ inline_type, last_type); ++ return -EUCLEAN; ++ } ++ /* Type changed, allow the sequence starts from U64_MAX again. */ ++ if (inline_type > last_type) ++ last_seq = U64_MAX; ++ if (seq > last_seq) { ++ extent_err(leaf, slot, ++"inline ref out-of-order: has type %u offset %llu seq 0x%llx, prev type %u seq 0x%llx", ++ inline_type, inline_offset, seq, ++ last_type, last_seq); ++ return -EUCLEAN; ++ } ++ last_type = inline_type; ++ last_seq = seq; + ptr += btrfs_extent_inline_ref_size(inline_type); + } + /* No padding is allowed */ +diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c +index b9945e4f697be..89b11336a8369 100644 +--- a/fs/cachefiles/daemon.c ++++ b/fs/cachefiles/daemon.c +@@ -357,14 +357,24 @@ static __poll_t cachefiles_daemon_poll(struct file *file, + struct poll_table_struct *poll) + { + struct cachefiles_cache *cache = file->private_data; ++ XA_STATE(xas, &cache->reqs, 0); ++ struct cachefiles_req *req; + __poll_t mask; + + poll_wait(file, &cache->daemon_pollwq, poll); + mask = 0; + + if (cachefiles_in_ondemand_mode(cache)) { +- if (!xa_empty(&cache->reqs)) +- mask |= EPOLLIN; ++ if (!xa_empty(&cache->reqs)) { ++ xas_lock(&xas); ++ xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) { ++ if (!cachefiles_ondemand_is_reopening_read(req)) { ++ mask |= EPOLLIN; ++ break; ++ } ++ } ++ xas_unlock(&xas); ++ } + } else { + if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags)) + mask |= EPOLLIN; +diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h +index 3eea52462fc87..111ad6ecd4baf 100644 +--- a/fs/cachefiles/internal.h ++++ b/fs/cachefiles/internal.h +@@ -48,6 +48,7 @@ enum cachefiles_object_state { + CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */ + CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */ + CACHEFILES_ONDEMAND_OBJSTATE_REOPENING, /* Object that was closed and is being reopened. */ ++ CACHEFILES_ONDEMAND_OBJSTATE_DROPPING, /* Object is being dropped. */ + }; + + struct cachefiles_ondemand_info { +@@ -128,6 +129,7 @@ struct cachefiles_cache { + unsigned long req_id_next; + struct xarray ondemand_ids; /* xarray for ondemand_id allocation */ + u32 ondemand_id_next; ++ u32 msg_id_next; + }; + + static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache) +@@ -335,6 +337,14 @@ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \ + CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN); + CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE); + CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING); ++CACHEFILES_OBJECT_STATE_FUNCS(dropping, DROPPING); ++ ++static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req) ++{ ++ return cachefiles_ondemand_object_is_reopening(req->object) && ++ req->msg.opcode == CACHEFILES_OP_READ; ++} ++ + #else + static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, + char __user *_buffer, size_t buflen) +@@ -365,6 +375,11 @@ static inline int cachefiles_ondemand_init_obj_info(struct cachefiles_object *ob + static inline void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj) + { + } ++ ++static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req) ++{ ++ return false; ++} + #endif + + /* +diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c +index 4b39f0422e590..51173ab6dbd84 100644 +--- a/fs/cachefiles/ondemand.c ++++ b/fs/cachefiles/ondemand.c +@@ -494,7 +494,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, + */ + xas_lock(&xas); + +- if (test_bit(CACHEFILES_DEAD, &cache->flags)) { ++ if (test_bit(CACHEFILES_DEAD, &cache->flags) || ++ cachefiles_ondemand_object_is_dropping(object)) { + xas_unlock(&xas); + ret = -EIO; + goto out; +@@ -504,20 +505,32 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, + smp_mb(); + + if (opcode == CACHEFILES_OP_CLOSE && +- !cachefiles_ondemand_object_is_open(object)) { ++ !cachefiles_ondemand_object_is_open(object)) { + WARN_ON_ONCE(object->ondemand->ondemand_id == 0); + xas_unlock(&xas); + ret = -EIO; + goto out; + } + +- xas.xa_index = 0; ++ /* ++ * Cyclically find a free xas to avoid msg_id reuse that would ++ * cause the daemon to successfully copen a stale msg_id. ++ */ ++ xas.xa_index = cache->msg_id_next; + xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK); ++ if (xas.xa_node == XAS_RESTART) { ++ xas.xa_index = 0; ++ xas_find_marked(&xas, cache->msg_id_next - 1, XA_FREE_MARK); ++ } + if (xas.xa_node == XAS_RESTART) + xas_set_err(&xas, -EBUSY); ++ + xas_store(&xas, req); +- xas_clear_mark(&xas, XA_FREE_MARK); +- xas_set_mark(&xas, CACHEFILES_REQ_NEW); ++ if (xas_valid(&xas)) { ++ cache->msg_id_next = xas.xa_index + 1; ++ xas_clear_mark(&xas, XA_FREE_MARK); ++ xas_set_mark(&xas, CACHEFILES_REQ_NEW); ++ } + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + +@@ -535,7 +548,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, + * If error occurs after creating the anonymous fd, + * cachefiles_ondemand_fd_release() will set object to close. + */ +- if (opcode == CACHEFILES_OP_OPEN) ++ if (opcode == CACHEFILES_OP_OPEN && ++ !cachefiles_ondemand_object_is_dropping(object)) + cachefiles_ondemand_set_object_close(object); + kfree(req); + return ret; +@@ -634,8 +648,34 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object) + + void cachefiles_ondemand_clean_object(struct cachefiles_object *object) + { ++ unsigned long index; ++ struct cachefiles_req *req; ++ struct cachefiles_cache *cache; ++ ++ if (!object->ondemand) ++ return; ++ + cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0, + cachefiles_ondemand_init_close_req, NULL); ++ ++ if (!object->ondemand->ondemand_id) ++ return; ++ ++ /* Cancel all requests for the object that is being dropped. */ ++ cache = object->volume->cache; ++ xa_lock(&cache->reqs); ++ cachefiles_ondemand_set_object_dropping(object); ++ xa_for_each(&cache->reqs, index, req) { ++ if (req->object == object) { ++ req->error = -EIO; ++ complete(&req->done); ++ __xa_erase(&cache->reqs, index); ++ } ++ } ++ xa_unlock(&cache->reqs); ++ ++ /* Wait for ondemand_object_worker() to finish to avoid UAF. */ ++ cancel_work_sync(&object->ondemand->ondemand_work); + } + + int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object, +diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c +index bcb6173943ee4..4dd8a993c60a8 100644 +--- a/fs/cachefiles/xattr.c ++++ b/fs/cachefiles/xattr.c +@@ -110,9 +110,11 @@ int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file + if (xlen == 0) + xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, tlen); + if (xlen != tlen) { +- if (xlen < 0) ++ if (xlen < 0) { ++ ret = xlen; + trace_cachefiles_vfs_error(object, file_inode(file), xlen, + cachefiles_trace_getxattr_error); ++ } + if (xlen == -EIO) + cachefiles_io_error_obj( + object, +@@ -252,6 +254,7 @@ int cachefiles_check_volume_xattr(struct cachefiles_volume *volume) + xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, len); + if (xlen != len) { + if (xlen < 0) { ++ ret = xlen; + trace_cachefiles_vfs_error(NULL, d_inode(dentry), xlen, + cachefiles_trace_getxattr_error); + if (xlen == -EIO) +diff --git a/fs/dcache.c b/fs/dcache.c +index 9ae808fba517b..186c0e1b5713f 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -356,7 +356,11 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry) + flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); + WRITE_ONCE(dentry->d_flags, flags); + dentry->d_inode = NULL; +- if (dentry->d_flags & DCACHE_LRU_LIST) ++ /* ++ * The negative counter only tracks dentries on the LRU. Don't inc if ++ * d_lru is on another list. ++ */ ++ if ((flags & (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST) + this_cpu_inc(nr_dentry_negative); + } + +@@ -2000,9 +2004,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) + + spin_lock(&dentry->d_lock); + /* +- * Decrement negative dentry count if it was in the LRU list. ++ * The negative counter only tracks dentries on the LRU. Don't dec if ++ * d_lru is on another list. + */ +- if (dentry->d_flags & DCACHE_LRU_LIST) ++ if ((dentry->d_flags & ++ (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST) + this_cpu_dec(nr_dentry_negative); + hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); + raw_write_seqcount_begin(&dentry->d_seq); +diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c +index 63cbda3700ea9..d65dccb44ed59 100644 +--- a/fs/ext4/sysfs.c ++++ b/fs/ext4/sysfs.c +@@ -473,6 +473,8 @@ static ssize_t ext4_attr_store(struct kobject *kobj, + *((unsigned int *) ptr) = t; + return len; + case attr_clusters_in_group: ++ if (!ptr) ++ return 0; + ret = kstrtouint(skip_spaces(buf), 0, &t); + if (ret) + return ret; +diff --git a/fs/locks.c b/fs/locks.c +index 76ad05f8070ad..fb717dae90297 100644 +--- a/fs/locks.c ++++ b/fs/locks.c +@@ -1314,9 +1314,9 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, + locks_wake_up_blocks(left); + } + out: ++ trace_posix_lock_inode(inode, request, error); + spin_unlock(&ctx->flc_lock); + percpu_up_read(&file_rwsem); +- trace_posix_lock_inode(inode, request, error); + /* + * Free any unused locks. + */ +diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c +index 51c982ad96086..53e4e63c607e7 100644 +--- a/fs/nilfs2/dir.c ++++ b/fs/nilfs2/dir.c +@@ -396,11 +396,39 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr, + + struct nilfs_dir_entry *nilfs_dotdot(struct inode *dir, struct page **p) + { +- struct nilfs_dir_entry *de = nilfs_get_page(dir, 0, p); ++ struct page *page; ++ struct nilfs_dir_entry *de, *next_de; ++ size_t limit; ++ char *msg; + ++ de = nilfs_get_page(dir, 0, &page); + if (IS_ERR(de)) + return NULL; +- return nilfs_next_entry(de); ++ ++ limit = nilfs_last_byte(dir, 0); /* is a multiple of chunk size */ ++ if (unlikely(!limit || le64_to_cpu(de->inode) != dir->i_ino || ++ !nilfs_match(1, ".", de))) { ++ msg = "missing '.'"; ++ goto fail; ++ } ++ ++ next_de = nilfs_next_entry(de); ++ /* ++ * If "next_de" has not reached the end of the chunk, there is ++ * at least one more record. Check whether it matches "..". ++ */ ++ if (unlikely((char *)next_de == (char *)de + nilfs_chunk_size(dir) || ++ !nilfs_match(2, "..", next_de))) { ++ msg = "missing '..'"; ++ goto fail; ++ } ++ *p = page; ++ return next_de; ++ ++fail: ++ nilfs_error(dir->i_sb, "directory #%lu %s", dir->i_ino, msg); ++ nilfs_put_page(page); ++ return NULL; + } + + ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr) +diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h +index c146f83eba9b4..53e00255d96b6 100644 +--- a/fs/smb/client/cifsglob.h ++++ b/fs/smb/client/cifsglob.h +@@ -1939,8 +1939,8 @@ require use of the stronger protocol */ + #define CIFSSEC_MUST_SEAL 0x40040 /* not supported yet */ + #define CIFSSEC_MUST_NTLMSSP 0x80080 /* raw ntlmssp with ntlmv2 */ + +-#define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP) +-#define CIFSSEC_MAX (CIFSSEC_MUST_NTLMV2) ++#define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP | CIFSSEC_MAY_SEAL) ++#define CIFSSEC_MAX (CIFSSEC_MAY_SIGN | CIFSSEC_MUST_KRB5 | CIFSSEC_MAY_SEAL) + #define CIFSSEC_AUTH_MASK (CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP) + /* + ***************************************************************** +diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c +index 6397f77b6750c..74e1971dc4851 100644 +--- a/fs/smb/server/smb2pdu.c ++++ b/fs/smb/server/smb2pdu.c +@@ -2051,15 +2051,22 @@ int smb2_tree_connect(struct ksmbd_work *work) + * @access: file access flags + * @disposition: file disposition flags + * @may_flags: set with MAY_ flags ++ * @is_dir: is creating open flags for directory + * + * Return: file open flags + */ + static int smb2_create_open_flags(bool file_present, __le32 access, + __le32 disposition, +- int *may_flags) ++ int *may_flags, ++ bool is_dir) + { + int oflags = O_NONBLOCK | O_LARGEFILE; + ++ if (is_dir) { ++ access &= ~FILE_WRITE_DESIRE_ACCESS_LE; ++ ksmbd_debug(SMB, "Discard write access to a directory\n"); ++ } ++ + if (access & FILE_READ_DESIRED_ACCESS_LE && + access & FILE_WRITE_DESIRE_ACCESS_LE) { + oflags |= O_RDWR; +@@ -3167,7 +3174,9 @@ int smb2_open(struct ksmbd_work *work) + + open_flags = smb2_create_open_flags(file_present, daccess, + req->CreateDisposition, +- &may_flags); ++ &may_flags, ++ req->CreateOptions & FILE_DIRECTORY_FILE_LE || ++ (file_present && S_ISDIR(d_inode(path.dentry)->i_mode))); + + if (!test_tree_conn_flag(tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) { + if (open_flags & (O_CREAT | O_TRUNC)) { +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c +index 1f47ff83a9c22..5d3e595f9da96 100644 +--- a/fs/userfaultfd.c ++++ b/fs/userfaultfd.c +@@ -2050,7 +2050,7 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx, + goto out; + features = uffdio_api.features; + ret = -EINVAL; +- if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) ++ if (uffdio_api.api != UFFD_API) + goto err_out; + ret = -EPERM; + if ((features & UFFD_FEATURE_EVENT_FORK) && !capable(CAP_SYS_PTRACE)) +@@ -2068,6 +2068,11 @@ static int userfaultfd_api(struct userfaultfd_ctx *ctx, + uffdio_api.features &= ~UFFD_FEATURE_WP_HUGETLBFS_SHMEM; + uffdio_api.features &= ~UFFD_FEATURE_WP_UNPOPULATED; + #endif ++ ++ ret = -EINVAL; ++ if (features & ~uffdio_api.features) ++ goto err_out; ++ + uffdio_api.ioctls = UFFD_API_IOCTLS; + ret = -EFAULT; + if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api))) +diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h +index 28566624f008f..f5859b8c68b42 100644 +--- a/include/linux/compiler_attributes.h ++++ b/include/linux/compiler_attributes.h +@@ -333,6 +333,18 @@ + */ + #define __section(section) __attribute__((__section__(section))) + ++/* ++ * Optional: only supported since gcc >= 12 ++ * ++ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-uninitialized-variable-attribute ++ * clang: https://clang.llvm.org/docs/AttributeReference.html#uninitialized ++ */ ++#if __has_attribute(__uninitialized__) ++# define __uninitialized __attribute__((__uninitialized__)) ++#else ++# define __uninitialized ++#endif ++ + /* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-unused-type-attribute +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h +index e46fbca003099..05092c37a430c 100644 +--- a/include/linux/mmzone.h ++++ b/include/linux/mmzone.h +@@ -1968,8 +1968,9 @@ static inline int subsection_map_index(unsigned long pfn) + static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) + { + int idx = subsection_map_index(pfn); ++ struct mem_section_usage *usage = READ_ONCE(ms->usage); + +- return test_bit(idx, READ_ONCE(ms->usage)->subsection_map); ++ return usage ? test_bit(idx, usage->subsection_map) : 0; + } + #else + static inline int pfn_section_valid(struct mem_section *ms, unsigned long pfn) +diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h +index 5a63b0927cc13..15793a4af9d44 100644 +--- a/include/linux/pagemap.h ++++ b/include/linux/pagemap.h +@@ -335,11 +335,18 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) + * a good order (that's 1MB if you're using 4kB pages) + */ + #ifdef CONFIG_TRANSPARENT_HUGEPAGE +-#define MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER ++#define PREFERRED_MAX_PAGECACHE_ORDER HPAGE_PMD_ORDER + #else +-#define MAX_PAGECACHE_ORDER 8 ++#define PREFERRED_MAX_PAGECACHE_ORDER 8 + #endif + ++/* ++ * xas_split_alloc() does not support arbitrary orders. This implies no ++ * 512MB THP on ARM64 with 64KB base page size. ++ */ ++#define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1) ++#define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER) ++ + /** + * mapping_set_large_folios() - Indicate the file supports large folios. + * @mapping: The file. +diff --git a/include/net/tcx.h b/include/net/tcx.h +index 264f147953bae..a0f78fd5cb287 100644 +--- a/include/net/tcx.h ++++ b/include/net/tcx.h +@@ -13,7 +13,7 @@ struct mini_Qdisc; + struct tcx_entry { + struct mini_Qdisc __rcu *miniq; + struct bpf_mprog_bundle bundle; +- bool miniq_active; ++ u32 miniq_active; + struct rcu_head rcu; + }; + +@@ -129,11 +129,16 @@ static inline void tcx_skeys_dec(bool ingress) + tcx_dec(); + } + +-static inline void tcx_miniq_set_active(struct bpf_mprog_entry *entry, +- const bool active) ++static inline void tcx_miniq_inc(struct bpf_mprog_entry *entry) + { + ASSERT_RTNL(); +- tcx_entry(entry)->miniq_active = active; ++ tcx_entry(entry)->miniq_active++; ++} ++ ++static inline void tcx_miniq_dec(struct bpf_mprog_entry *entry) ++{ ++ ASSERT_RTNL(); ++ tcx_entry(entry)->miniq_active--; + } + + static inline bool tcx_entry_is_active(struct bpf_mprog_entry *entry) +diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h +index f33d914d8f469..91583690bddc5 100644 +--- a/include/uapi/misc/fastrpc.h ++++ b/include/uapi/misc/fastrpc.h +@@ -8,11 +8,14 @@ + #define FASTRPC_IOCTL_ALLOC_DMA_BUFF _IOWR('R', 1, struct fastrpc_alloc_dma_buf) + #define FASTRPC_IOCTL_FREE_DMA_BUFF _IOWR('R', 2, __u32) + #define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke) ++/* This ioctl is only supported with secure device nodes */ + #define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4) + #define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create) + #define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap) + #define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap) ++/* This ioctl is only supported with secure device nodes */ + #define FASTRPC_IOCTL_INIT_ATTACH_SNS _IO('R', 8) ++/* This ioctl is only supported with secure device nodes */ + #define FASTRPC_IOCTL_INIT_CREATE_STATIC _IOWR('R', 9, struct fastrpc_init_create_static) + #define FASTRPC_IOCTL_MEM_MAP _IOWR('R', 10, struct fastrpc_mem_map) + #define FASTRPC_IOCTL_MEM_UNMAP _IOWR('R', 11, struct fastrpc_mem_unmap) +diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c +index 146824cc96893..e8d02212da703 100644 +--- a/kernel/bpf/bpf_local_storage.c ++++ b/kernel/bpf/bpf_local_storage.c +@@ -808,8 +808,8 @@ bpf_local_storage_map_alloc(union bpf_attr *attr, + nbuckets = max_t(u32, 2, nbuckets); + smap->bucket_log = ilog2(nbuckets); + +- smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets), +- nbuckets, GFP_USER | __GFP_NOWARN); ++ smap->buckets = bpf_map_kvcalloc(&smap->map, nbuckets, ++ sizeof(*smap->buckets), GFP_USER | __GFP_NOWARN); + if (!smap->buckets) { + err = -ENOMEM; + goto free_smap; +diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c +index 31da67703307b..9ab6be9653059 100644 +--- a/kernel/bpf/helpers.c ++++ b/kernel/bpf/helpers.c +@@ -1078,11 +1078,20 @@ const struct bpf_func_proto bpf_snprintf_proto = { + .arg5_type = ARG_CONST_SIZE_OR_ZERO, + }; + ++struct bpf_async_cb { ++ struct bpf_map *map; ++ struct bpf_prog *prog; ++ void __rcu *callback_fn; ++ void *value; ++ struct rcu_head rcu; ++ u64 flags; ++}; ++ + /* BPF map elements can contain 'struct bpf_timer'. + * Such map owns all of its BPF timers. + * 'struct bpf_timer' is allocated as part of map element allocation + * and it's zero initialized. +- * That space is used to keep 'struct bpf_timer_kern'. ++ * That space is used to keep 'struct bpf_async_kern'. + * bpf_timer_init() allocates 'struct bpf_hrtimer', inits hrtimer, and + * remembers 'struct bpf_map *' pointer it's part of. + * bpf_timer_set_callback() increments prog refcnt and assign bpf callback_fn. +@@ -1095,17 +1104,17 @@ const struct bpf_func_proto bpf_snprintf_proto = { + * freeing the timers when inner map is replaced or deleted by user space. + */ + struct bpf_hrtimer { ++ struct bpf_async_cb cb; + struct hrtimer timer; +- struct bpf_map *map; +- struct bpf_prog *prog; +- void __rcu *callback_fn; +- void *value; +- struct rcu_head rcu; ++ atomic_t cancelling; + }; + + /* the actual struct hidden inside uapi struct bpf_timer */ +-struct bpf_timer_kern { +- struct bpf_hrtimer *timer; ++struct bpf_async_kern { ++ union { ++ struct bpf_async_cb *cb; ++ struct bpf_hrtimer *timer; ++ }; + /* bpf_spin_lock is used here instead of spinlock_t to make + * sure that it always fits into space reserved by struct bpf_timer + * regardless of LOCKDEP and spinlock debug flags. +@@ -1113,19 +1122,23 @@ struct bpf_timer_kern { + struct bpf_spin_lock lock; + } __attribute__((aligned(8))); + ++enum bpf_async_type { ++ BPF_ASYNC_TYPE_TIMER = 0, ++}; ++ + static DEFINE_PER_CPU(struct bpf_hrtimer *, hrtimer_running); + + static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) + { + struct bpf_hrtimer *t = container_of(hrtimer, struct bpf_hrtimer, timer); +- struct bpf_map *map = t->map; +- void *value = t->value; ++ struct bpf_map *map = t->cb.map; ++ void *value = t->cb.value; + bpf_callback_t callback_fn; + void *key; + u32 idx; + + BTF_TYPE_EMIT(struct bpf_timer); +- callback_fn = rcu_dereference_check(t->callback_fn, rcu_read_lock_bh_held()); ++ callback_fn = rcu_dereference_check(t->cb.callback_fn, rcu_read_lock_bh_held()); + if (!callback_fn) + goto out; + +@@ -1154,46 +1167,56 @@ static enum hrtimer_restart bpf_timer_cb(struct hrtimer *hrtimer) + return HRTIMER_NORESTART; + } + +-BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map, +- u64, flags) ++static int __bpf_async_init(struct bpf_async_kern *async, struct bpf_map *map, u64 flags, ++ enum bpf_async_type type) + { +- clockid_t clockid = flags & (MAX_CLOCKS - 1); ++ struct bpf_async_cb *cb; + struct bpf_hrtimer *t; ++ clockid_t clockid; ++ size_t size; + int ret = 0; + +- BUILD_BUG_ON(MAX_CLOCKS != 16); +- BUILD_BUG_ON(sizeof(struct bpf_timer_kern) > sizeof(struct bpf_timer)); +- BUILD_BUG_ON(__alignof__(struct bpf_timer_kern) != __alignof__(struct bpf_timer)); +- + if (in_nmi()) + return -EOPNOTSUPP; + +- if (flags >= MAX_CLOCKS || +- /* similar to timerfd except _ALARM variants are not supported */ +- (clockid != CLOCK_MONOTONIC && +- clockid != CLOCK_REALTIME && +- clockid != CLOCK_BOOTTIME)) ++ switch (type) { ++ case BPF_ASYNC_TYPE_TIMER: ++ size = sizeof(struct bpf_hrtimer); ++ break; ++ default: + return -EINVAL; +- __bpf_spin_lock_irqsave(&timer->lock); +- t = timer->timer; ++ } ++ ++ __bpf_spin_lock_irqsave(&async->lock); ++ t = async->timer; + if (t) { + ret = -EBUSY; + goto out; + } ++ + /* allocate hrtimer via map_kmalloc to use memcg accounting */ +- t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node); +- if (!t) { ++ cb = bpf_map_kmalloc_node(map, size, GFP_ATOMIC, map->numa_node); ++ if (!cb) { + ret = -ENOMEM; + goto out; + } +- t->value = (void *)timer - map->record->timer_off; +- t->map = map; +- t->prog = NULL; +- rcu_assign_pointer(t->callback_fn, NULL); +- hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT); +- t->timer.function = bpf_timer_cb; +- WRITE_ONCE(timer->timer, t); +- /* Guarantee the order between timer->timer and map->usercnt. So ++ ++ if (type == BPF_ASYNC_TYPE_TIMER) { ++ clockid = flags & (MAX_CLOCKS - 1); ++ t = (struct bpf_hrtimer *)cb; ++ ++ atomic_set(&t->cancelling, 0); ++ hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT); ++ t->timer.function = bpf_timer_cb; ++ cb->value = (void *)async - map->record->timer_off; ++ } ++ cb->map = map; ++ cb->prog = NULL; ++ cb->flags = flags; ++ rcu_assign_pointer(cb->callback_fn, NULL); ++ ++ WRITE_ONCE(async->cb, cb); ++ /* Guarantee the order between async->cb and map->usercnt. So + * when there are concurrent uref release and bpf timer init, either + * bpf_timer_cancel_and_free() called by uref release reads a no-NULL + * timer or atomic64_read() below returns a zero usercnt. +@@ -1203,15 +1226,34 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map + /* maps with timers must be either held by user space + * or pinned in bpffs. + */ +- WRITE_ONCE(timer->timer, NULL); +- kfree(t); ++ WRITE_ONCE(async->cb, NULL); ++ kfree(cb); + ret = -EPERM; + } + out: +- __bpf_spin_unlock_irqrestore(&timer->lock); ++ __bpf_spin_unlock_irqrestore(&async->lock); + return ret; + } + ++BPF_CALL_3(bpf_timer_init, struct bpf_async_kern *, timer, struct bpf_map *, map, ++ u64, flags) ++{ ++ clock_t clockid = flags & (MAX_CLOCKS - 1); ++ ++ BUILD_BUG_ON(MAX_CLOCKS != 16); ++ BUILD_BUG_ON(sizeof(struct bpf_async_kern) > sizeof(struct bpf_timer)); ++ BUILD_BUG_ON(__alignof__(struct bpf_async_kern) != __alignof__(struct bpf_timer)); ++ ++ if (flags >= MAX_CLOCKS || ++ /* similar to timerfd except _ALARM variants are not supported */ ++ (clockid != CLOCK_MONOTONIC && ++ clockid != CLOCK_REALTIME && ++ clockid != CLOCK_BOOTTIME)) ++ return -EINVAL; ++ ++ return __bpf_async_init(timer, map, flags, BPF_ASYNC_TYPE_TIMER); ++} ++ + static const struct bpf_func_proto bpf_timer_init_proto = { + .func = bpf_timer_init, + .gpl_only = true, +@@ -1221,7 +1263,7 @@ static const struct bpf_func_proto bpf_timer_init_proto = { + .arg3_type = ARG_ANYTHING, + }; + +-BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callback_fn, ++BPF_CALL_3(bpf_timer_set_callback, struct bpf_async_kern *, timer, void *, callback_fn, + struct bpf_prog_aux *, aux) + { + struct bpf_prog *prev, *prog = aux->prog; +@@ -1236,7 +1278,7 @@ BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callb + ret = -EINVAL; + goto out; + } +- if (!atomic64_read(&t->map->usercnt)) { ++ if (!atomic64_read(&t->cb.map->usercnt)) { + /* maps with timers must be either held by user space + * or pinned in bpffs. Otherwise timer might still be + * running even when bpf prog is detached and user space +@@ -1245,7 +1287,7 @@ BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callb + ret = -EPERM; + goto out; + } +- prev = t->prog; ++ prev = t->cb.prog; + if (prev != prog) { + /* Bump prog refcnt once. Every bpf_timer_set_callback() + * can pick different callback_fn-s within the same prog. +@@ -1258,9 +1300,9 @@ BPF_CALL_3(bpf_timer_set_callback, struct bpf_timer_kern *, timer, void *, callb + if (prev) + /* Drop prev prog refcnt when swapping with new prog */ + bpf_prog_put(prev); +- t->prog = prog; ++ t->cb.prog = prog; + } +- rcu_assign_pointer(t->callback_fn, callback_fn); ++ rcu_assign_pointer(t->cb.callback_fn, callback_fn); + out: + __bpf_spin_unlock_irqrestore(&timer->lock); + return ret; +@@ -1274,7 +1316,7 @@ static const struct bpf_func_proto bpf_timer_set_callback_proto = { + .arg2_type = ARG_PTR_TO_FUNC, + }; + +-BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, flags) ++BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, timer, u64, nsecs, u64, flags) + { + struct bpf_hrtimer *t; + int ret = 0; +@@ -1286,7 +1328,7 @@ BPF_CALL_3(bpf_timer_start, struct bpf_timer_kern *, timer, u64, nsecs, u64, fla + return -EINVAL; + __bpf_spin_lock_irqsave(&timer->lock); + t = timer->timer; +- if (!t || !t->prog) { ++ if (!t || !t->cb.prog) { + ret = -EINVAL; + goto out; + } +@@ -1311,20 +1353,21 @@ static const struct bpf_func_proto bpf_timer_start_proto = { + .arg3_type = ARG_ANYTHING, + }; + +-static void drop_prog_refcnt(struct bpf_hrtimer *t) ++static void drop_prog_refcnt(struct bpf_async_cb *async) + { +- struct bpf_prog *prog = t->prog; ++ struct bpf_prog *prog = async->prog; + + if (prog) { + bpf_prog_put(prog); +- t->prog = NULL; +- rcu_assign_pointer(t->callback_fn, NULL); ++ async->prog = NULL; ++ rcu_assign_pointer(async->callback_fn, NULL); + } + } + +-BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer) ++BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, timer) + { +- struct bpf_hrtimer *t; ++ struct bpf_hrtimer *t, *cur_t; ++ bool inc = false; + int ret = 0; + + if (in_nmi()) +@@ -1336,21 +1379,50 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_timer_kern *, timer) + ret = -EINVAL; + goto out; + } +- if (this_cpu_read(hrtimer_running) == t) { ++ ++ cur_t = this_cpu_read(hrtimer_running); ++ if (cur_t == t) { + /* If bpf callback_fn is trying to bpf_timer_cancel() + * its own timer the hrtimer_cancel() will deadlock +- * since it waits for callback_fn to finish ++ * since it waits for callback_fn to finish. ++ */ ++ ret = -EDEADLK; ++ goto out; ++ } ++ ++ /* Only account in-flight cancellations when invoked from a timer ++ * callback, since we want to avoid waiting only if other _callbacks_ ++ * are waiting on us, to avoid introducing lockups. Non-callback paths ++ * are ok, since nobody would synchronously wait for their completion. ++ */ ++ if (!cur_t) ++ goto drop; ++ atomic_inc(&t->cancelling); ++ /* Need full barrier after relaxed atomic_inc */ ++ smp_mb__after_atomic(); ++ inc = true; ++ if (atomic_read(&cur_t->cancelling)) { ++ /* We're cancelling timer t, while some other timer callback is ++ * attempting to cancel us. In such a case, it might be possible ++ * that timer t belongs to the other callback, or some other ++ * callback waiting upon it (creating transitive dependencies ++ * upon us), and we will enter a deadlock if we continue ++ * cancelling and waiting for it synchronously, since it might ++ * do the same. Bail! + */ + ret = -EDEADLK; + goto out; + } +- drop_prog_refcnt(t); ++drop: ++ drop_prog_refcnt(&t->cb); + out: + __bpf_spin_unlock_irqrestore(&timer->lock); + /* Cancel the timer and wait for associated callback to finish + * if it was running. + */ + ret = ret ?: hrtimer_cancel(&t->timer); ++ if (inc) ++ atomic_dec(&t->cancelling); + rcu_read_unlock(); + return ret; + } +@@ -1367,7 +1439,7 @@ static const struct bpf_func_proto bpf_timer_cancel_proto = { + */ + void bpf_timer_cancel_and_free(void *val) + { +- struct bpf_timer_kern *timer = val; ++ struct bpf_async_kern *timer = val; + struct bpf_hrtimer *t; + + /* Performance optimization: read timer->timer without lock first. */ +@@ -1379,7 +1451,7 @@ void bpf_timer_cancel_and_free(void *val) + t = timer->timer; + if (!t) + goto out; +- drop_prog_refcnt(t); ++ drop_prog_refcnt(&t->cb); + /* The subsequent bpf_timer_start/cancel() helpers won't be able to use + * this timer, since it won't be initialized. + */ +@@ -1406,7 +1478,7 @@ void bpf_timer_cancel_and_free(void *val) + */ + if (this_cpu_read(hrtimer_running) != t) + hrtimer_cancel(&t->timer); +- kfree_rcu(t, rcu); ++ kfree_rcu(t, cb.rcu); + } + + BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr) +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index dcb30e304871a..820880960513b 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -722,7 +722,6 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) + + rq->prev_irq_time += irq_delta; + delta -= irq_delta; +- psi_account_irqtime(rq->curr, irq_delta); + delayacct_irq(rq->curr, irq_delta); + #endif + #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING +@@ -5641,7 +5640,7 @@ void scheduler_tick(void) + { + int cpu = smp_processor_id(); + struct rq *rq = cpu_rq(cpu); +- struct task_struct *curr = rq->curr; ++ struct task_struct *curr; + struct rq_flags rf; + unsigned long thermal_pressure; + u64 resched_latency; +@@ -5653,6 +5652,9 @@ void scheduler_tick(void) + + rq_lock(rq, &rf); + ++ curr = rq->curr; ++ psi_account_irqtime(rq, curr, NULL); ++ + update_rq_clock(rq); + thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); + update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); +@@ -6690,6 +6692,7 @@ static void __sched notrace __schedule(unsigned int sched_mode) + ++*switch_count; + + migrate_disable_switch(rq, prev); ++ psi_account_irqtime(rq, prev, next); + psi_sched_switch(prev, next, !task_on_rq_queued(prev)); + + trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state); +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index 01b3fc97ddc27..d3d0a1c9336be 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -8977,12 +8977,8 @@ static int detach_tasks(struct lb_env *env) + break; + + env->loop++; +- /* +- * We've more or less seen every task there is, call it quits +- * unless we haven't found any movable task yet. +- */ +- if (env->loop > env->loop_max && +- !(env->flags & LBF_ALL_PINNED)) ++ /* We've more or less seen every task there is, call it quits */ ++ if (env->loop > env->loop_max) + break; + + /* take a breather every nr_migrate tasks */ +@@ -11259,9 +11255,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, + + if (env.flags & LBF_NEED_BREAK) { + env.flags &= ~LBF_NEED_BREAK; +- /* Stop if we tried all running tasks */ +- if (env.loop < busiest->nr_running) +- goto more_balance; ++ goto more_balance; + } + + /* +diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c +index 1d0f634725a6e..431971acc7632 100644 +--- a/kernel/sched/psi.c ++++ b/kernel/sched/psi.c +@@ -784,6 +784,7 @@ static void psi_group_change(struct psi_group *group, int cpu, + enum psi_states s; + u32 state_mask; + ++ lockdep_assert_rq_held(cpu_rq(cpu)); + groupc = per_cpu_ptr(group->pcpu, cpu); + + /* +@@ -1002,19 +1003,29 @@ void psi_task_switch(struct task_struct *prev, struct task_struct *next, + } + + #ifdef CONFIG_IRQ_TIME_ACCOUNTING +-void psi_account_irqtime(struct task_struct *task, u32 delta) ++void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev) + { +- int cpu = task_cpu(task); ++ int cpu = task_cpu(curr); + struct psi_group *group; + struct psi_group_cpu *groupc; +- u64 now; ++ u64 now, irq; ++ s64 delta; + +- if (!task->pid) ++ if (!curr->pid) ++ return; ++ ++ lockdep_assert_rq_held(rq); ++ group = task_psi_group(curr); ++ if (prev && task_psi_group(prev) == group) + return; + + now = cpu_clock(cpu); ++ irq = irq_time_read(cpu); ++ delta = (s64)(irq - rq->psi_irq_time); ++ if (delta < 0) ++ return; ++ rq->psi_irq_time = irq; + +- group = task_psi_group(task); + do { + if (!group->enabled) + continue; +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index 35c38daa2d3e1..2e8f26a919edf 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -1094,6 +1094,7 @@ struct rq { + + #ifdef CONFIG_IRQ_TIME_ACCOUNTING + u64 prev_irq_time; ++ u64 psi_irq_time; + #endif + #ifdef CONFIG_PARAVIRT + u64 prev_steal_time; +diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h +index 38f3698f5e5b3..b02dfc3229510 100644 +--- a/kernel/sched/stats.h ++++ b/kernel/sched/stats.h +@@ -110,8 +110,12 @@ __schedstats_from_se(struct sched_entity *se) + void psi_task_change(struct task_struct *task, int clear, int set); + void psi_task_switch(struct task_struct *prev, struct task_struct *next, + bool sleep); +-void psi_account_irqtime(struct task_struct *task, u32 delta); +- ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++void psi_account_irqtime(struct rq *rq, struct task_struct *curr, struct task_struct *prev); ++#else ++static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr, ++ struct task_struct *prev) {} ++#endif /*CONFIG_IRQ_TIME_ACCOUNTING */ + /* + * PSI tracks state that persists across sleeps, such as iowaits and + * memory stalls. As a result, it has to distinguish between sleeps, +@@ -192,7 +196,8 @@ static inline void psi_ttwu_dequeue(struct task_struct *p) {} + static inline void psi_sched_switch(struct task_struct *prev, + struct task_struct *next, + bool sleep) {} +-static inline void psi_account_irqtime(struct task_struct *task, u32 delta) {} ++static inline void psi_account_irqtime(struct rq *rq, struct task_struct *curr, ++ struct task_struct *prev) {} + #endif /* CONFIG_PSI */ + + #ifdef CONFIG_SCHED_INFO +diff --git a/mm/damon/core.c b/mm/damon/core.c +index 38e206075143a..ae55f20835b06 100644 +--- a/mm/damon/core.c ++++ b/mm/damon/core.c +@@ -1121,14 +1121,31 @@ static void damon_merge_regions_of(struct damon_target *t, unsigned int thres, + * access frequencies are similar. This is for minimizing the monitoring + * overhead under the dynamically changeable access pattern. If a merge was + * unnecessarily made, later 'kdamond_split_regions()' will revert it. ++ * ++ * The total number of regions could be higher than the user-defined limit, ++ * max_nr_regions for some cases. For example, the user can update ++ * max_nr_regions to a number that lower than the current number of regions ++ * while DAMON is running. For such a case, repeat merging until the limit is ++ * met while increasing @threshold up to possible maximum level. + */ + static void kdamond_merge_regions(struct damon_ctx *c, unsigned int threshold, + unsigned long sz_limit) + { + struct damon_target *t; +- +- damon_for_each_target(t, c) +- damon_merge_regions_of(t, threshold, sz_limit); ++ unsigned int nr_regions; ++ unsigned int max_thres; ++ ++ max_thres = c->attrs.aggr_interval / ++ (c->attrs.sample_interval ? c->attrs.sample_interval : 1); ++ do { ++ nr_regions = 0; ++ damon_for_each_target(t, c) { ++ damon_merge_regions_of(t, threshold, sz_limit); ++ nr_regions += damon_nr_regions(t); ++ } ++ threshold = max(1, threshold * 2); ++ } while (nr_regions > c->attrs.max_nr_regions && ++ threshold / 2 < max_thres); + } + + /* +diff --git a/mm/filemap.c b/mm/filemap.c +index 3dba1792beba6..8752b794cb843 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -3157,7 +3157,7 @@ static struct file *do_sync_mmap_readahead(struct vm_fault *vmf) + + #ifdef CONFIG_TRANSPARENT_HUGEPAGE + /* Use the readahead code, even if readahead is disabled */ +- if (vm_flags & VM_HUGEPAGE) { ++ if ((vm_flags & VM_HUGEPAGE) && HPAGE_PMD_ORDER <= MAX_PAGECACHE_ORDER) { + fpin = maybe_unlock_mmap_for_io(vmf, fpin); + ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1); + ra->size = HPAGE_PMD_NR; +diff --git a/mm/shmem.c b/mm/shmem.c +index f2023cb7f6f6e..3d721d5591dd7 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -535,8 +535,9 @@ static bool shmem_confirm_swap(struct address_space *mapping, + + static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; + +-bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, +- struct mm_struct *mm, unsigned long vm_flags) ++static bool __shmem_is_huge(struct inode *inode, pgoff_t index, ++ bool shmem_huge_force, struct mm_struct *mm, ++ unsigned long vm_flags) + { + loff_t i_size; + +@@ -567,6 +568,16 @@ bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, + } + } + ++bool shmem_is_huge(struct inode *inode, pgoff_t index, ++ bool shmem_huge_force, struct mm_struct *mm, ++ unsigned long vm_flags) ++{ ++ if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) ++ return false; ++ ++ return __shmem_is_huge(inode, index, shmem_huge_force, mm, vm_flags); ++} ++ + #if defined(CONFIG_SYSFS) + static int shmem_parse_huge(const char *str) + { +diff --git a/mm/vmalloc.c b/mm/vmalloc.c +index 7d146f198bc32..078f6b53f8d50 100644 +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -1984,7 +1984,15 @@ static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); + static struct xarray * + addr_to_vb_xa(unsigned long addr) + { +- int index = (addr / VMAP_BLOCK_SIZE) % num_possible_cpus(); ++ int index = (addr / VMAP_BLOCK_SIZE) % nr_cpu_ids; ++ ++ /* ++ * Please note, nr_cpu_ids points on a highest set ++ * possible bit, i.e. we never invoke cpumask_next() ++ * if an index points on it which is nr_cpu_ids - 1. ++ */ ++ if (!cpu_possible(index)) ++ index = cpumask_next(index, cpu_possible_mask); + + return &per_cpu(vmap_block_queue, index).vmap_blocks; + } +diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c +index faabad6603db2..68f9552931776 100644 +--- a/net/ceph/mon_client.c ++++ b/net/ceph/mon_client.c +@@ -1085,13 +1085,19 @@ static void delayed_work(struct work_struct *work) + struct ceph_mon_client *monc = + container_of(work, struct ceph_mon_client, delayed_work.work); + +- dout("monc delayed_work\n"); + mutex_lock(&monc->mutex); ++ dout("%s mon%d\n", __func__, monc->cur_mon); ++ if (monc->cur_mon < 0) { ++ goto out; ++ } ++ + if (monc->hunting) { + dout("%s continuing hunt\n", __func__); + reopen_session(monc); + } else { + int is_auth = ceph_auth_is_authenticated(monc->auth); ++ ++ dout("%s is_authed %d\n", __func__, is_auth); + if (ceph_con_keepalive_expired(&monc->con, + CEPH_MONC_PING_TIMEOUT)) { + dout("monc keepalive timeout\n"); +@@ -1116,6 +1122,8 @@ static void delayed_work(struct work_struct *work) + } + } + __schedule_delayed(monc); ++ ++out: + mutex_unlock(&monc->mutex); + } + +@@ -1232,13 +1240,15 @@ EXPORT_SYMBOL(ceph_monc_init); + void ceph_monc_stop(struct ceph_mon_client *monc) + { + dout("stop\n"); +- cancel_delayed_work_sync(&monc->delayed_work); + + mutex_lock(&monc->mutex); + __close_session(monc); ++ monc->hunting = false; + monc->cur_mon = -1; + mutex_unlock(&monc->mutex); + ++ cancel_delayed_work_sync(&monc->delayed_work); ++ + /* + * flush msgr queue before we destroy ourselves to ensure that: + * - any work that references our embedded con is finished. +diff --git a/net/core/datagram.c b/net/core/datagram.c +index e49aaf2d48b80..ef4e9e423d393 100644 +--- a/net/core/datagram.c ++++ b/net/core/datagram.c +@@ -441,11 +441,12 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset, + if (copy > len) + copy = len; + ++ n = 0; + skb_frag_foreach_page(frag, + skb_frag_off(frag) + offset - start, + copy, p, p_off, p_len, copied) { + vaddr = kmap_local_page(p); +- n = INDIRECT_CALL_1(cb, simple_copy_to_iter, ++ n += INDIRECT_CALL_1(cb, simple_copy_to_iter, + vaddr + p_off, p_len, data, to); + kunmap_local(vaddr); + } +diff --git a/net/core/skmsg.c b/net/core/skmsg.c +index fd20aae30be23..bbf40b9997138 100644 +--- a/net/core/skmsg.c ++++ b/net/core/skmsg.c +@@ -434,7 +434,8 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, + page = sg_page(sge); + if (copied + copy > len) + copy = len - copied; +- copy = copy_page_to_iter(page, sge->offset, copy, iter); ++ if (copy) ++ copy = copy_page_to_iter(page, sge->offset, copy, iter); + if (!copy) { + copied = copied ? copied : -EFAULT; + goto out; +diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c +index b2de2108b356a..34d76e87847d0 100644 +--- a/net/ethtool/linkstate.c ++++ b/net/ethtool/linkstate.c +@@ -37,6 +37,8 @@ static int linkstate_get_sqi(struct net_device *dev) + mutex_lock(&phydev->lock); + if (!phydev->drv || !phydev->drv->get_sqi) + ret = -EOPNOTSUPP; ++ else if (!phydev->link) ++ ret = -ENETDOWN; + else + ret = phydev->drv->get_sqi(phydev); + mutex_unlock(&phydev->lock); +@@ -55,6 +57,8 @@ static int linkstate_get_sqi_max(struct net_device *dev) + mutex_lock(&phydev->lock); + if (!phydev->drv || !phydev->drv->get_sqi_max) + ret = -EOPNOTSUPP; ++ else if (!phydev->link) ++ ret = -ENETDOWN; + else + ret = phydev->drv->get_sqi_max(phydev); + mutex_unlock(&phydev->lock); +@@ -62,6 +66,17 @@ static int linkstate_get_sqi_max(struct net_device *dev) + return ret; + }; + ++static bool linkstate_sqi_critical_error(int sqi) ++{ ++ return sqi < 0 && sqi != -EOPNOTSUPP && sqi != -ENETDOWN; ++} ++ ++static bool linkstate_sqi_valid(struct linkstate_reply_data *data) ++{ ++ return data->sqi >= 0 && data->sqi_max >= 0 && ++ data->sqi <= data->sqi_max; ++} ++ + static int linkstate_get_link_ext_state(struct net_device *dev, + struct linkstate_reply_data *data) + { +@@ -93,12 +108,12 @@ static int linkstate_prepare_data(const struct ethnl_req_info *req_base, + data->link = __ethtool_get_link(dev); + + ret = linkstate_get_sqi(dev); +- if (ret < 0 && ret != -EOPNOTSUPP) ++ if (linkstate_sqi_critical_error(ret)) + goto out; + data->sqi = ret; + + ret = linkstate_get_sqi_max(dev); +- if (ret < 0 && ret != -EOPNOTSUPP) ++ if (linkstate_sqi_critical_error(ret)) + goto out; + data->sqi_max = ret; + +@@ -136,11 +151,10 @@ static int linkstate_reply_size(const struct ethnl_req_info *req_base, + len = nla_total_size(sizeof(u8)) /* LINKSTATE_LINK */ + + 0; + +- if (data->sqi != -EOPNOTSUPP) +- len += nla_total_size(sizeof(u32)); +- +- if (data->sqi_max != -EOPNOTSUPP) +- len += nla_total_size(sizeof(u32)); ++ if (linkstate_sqi_valid(data)) { ++ len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI */ ++ len += nla_total_size(sizeof(u32)); /* LINKSTATE_SQI_MAX */ ++ } + + if (data->link_ext_state_provided) + len += nla_total_size(sizeof(u8)); /* LINKSTATE_EXT_STATE */ +@@ -164,13 +178,14 @@ static int linkstate_fill_reply(struct sk_buff *skb, + nla_put_u8(skb, ETHTOOL_A_LINKSTATE_LINK, !!data->link)) + return -EMSGSIZE; + +- if (data->sqi != -EOPNOTSUPP && +- nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi)) +- return -EMSGSIZE; ++ if (linkstate_sqi_valid(data)) { ++ if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI, data->sqi)) ++ return -EMSGSIZE; + +- if (data->sqi_max != -EOPNOTSUPP && +- nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX, data->sqi_max)) +- return -EMSGSIZE; ++ if (nla_put_u32(skb, ETHTOOL_A_LINKSTATE_SQI_MAX, ++ data->sqi_max)) ++ return -EMSGSIZE; ++ } + + if (data->link_ext_state_provided) { + if (nla_put_u8(skb, ETHTOOL_A_LINKSTATE_EXT_STATE, +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 2621c262f182a..b9133c0972d38 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -2106,8 +2106,16 @@ void tcp_clear_retrans(struct tcp_sock *tp) + static inline void tcp_init_undo(struct tcp_sock *tp) + { + tp->undo_marker = tp->snd_una; ++ + /* Retransmission still in flight may cause DSACKs later. */ +- tp->undo_retrans = tp->retrans_out ? : -1; ++ /* First, account for regular retransmits in flight: */ ++ tp->undo_retrans = tp->retrans_out; ++ /* Next, account for TLP retransmits in flight: */ ++ if (tp->tlp_high_seq && tp->tlp_retrans) ++ tp->undo_retrans++; ++ /* Finally, avoid 0, because undo_retrans==0 means "can undo now": */ ++ if (!tp->undo_retrans) ++ tp->undo_retrans = -1; + } + + static bool tcp_is_rack(const struct sock *sk) +@@ -2186,6 +2194,7 @@ void tcp_enter_loss(struct sock *sk) + + tcp_set_ca_state(sk, TCP_CA_Loss); + tp->high_seq = tp->snd_nxt; ++ tp->tlp_high_seq = 0; + tcp_ecn_queue_cwr(tp); + + /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c +index 984ab4a0421ed..87ebe958a642f 100644 +--- a/net/ipv4/tcp_timer.c ++++ b/net/ipv4/tcp_timer.c +@@ -457,17 +457,34 @@ static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req) + static bool tcp_rtx_probe0_timed_out(const struct sock *sk, + const struct sk_buff *skb) + { ++ const struct inet_connection_sock *icsk = inet_csk(sk); ++ u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout); + const struct tcp_sock *tp = tcp_sk(sk); +- const int timeout = TCP_RTO_MAX * 2; +- u32 rcv_delta, rtx_delta; +- +- rcv_delta = inet_csk(sk)->icsk_timeout - tp->rcv_tstamp; +- if (rcv_delta <= timeout) +- return false; ++ int timeout = TCP_RTO_MAX * 2; ++ u32 rtx_delta; ++ s32 rcv_delta; + + rtx_delta = (u32)msecs_to_jiffies(tcp_time_stamp(tp) - + (tp->retrans_stamp ?: tcp_skb_timestamp(skb))); + ++ if (user_timeout) { ++ /* If user application specified a TCP_USER_TIMEOUT, ++ * it does not want win 0 packets to 'reset the timer' ++ * while retransmits are not making progress. ++ */ ++ if (rtx_delta > user_timeout) ++ return true; ++ timeout = min_t(u32, timeout, msecs_to_jiffies(user_timeout)); ++ } ++ ++ /* Note: timer interrupt might have been delayed by at least one jiffy, ++ * and tp->rcv_tstamp might very well have been written recently. ++ * rcv_delta can thus be negative. ++ */ ++ rcv_delta = icsk->icsk_timeout - tp->rcv_tstamp; ++ if (rcv_delta <= timeout) ++ return false; ++ + return rtx_delta > timeout; + } + +@@ -509,8 +526,6 @@ void tcp_retransmit_timer(struct sock *sk) + if (WARN_ON_ONCE(!skb)) + return; + +- tp->tlp_high_seq = 0; +- + if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && + !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { + /* Receiver dastardly shrinks window. Our retransmits +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c +index 16ca211c8619d..73fb814460b6b 100644 +--- a/net/ipv4/udp.c ++++ b/net/ipv4/udp.c +@@ -326,6 +326,8 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, + goto fail_unlock; + } + ++ sock_set_flag(sk, SOCK_RCU_FREE); ++ + sk_add_node_rcu(sk, &hslot->head); + hslot->count++; + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); +@@ -342,7 +344,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum, + hslot2->count++; + spin_unlock(&hslot2->lock); + } +- sock_set_flag(sk, SOCK_RCU_FREE); ++ + error = 0; + fail_unlock: + spin_unlock_bh(&hslot->lock); +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c +index 1bd86868726bf..3ac19516ed803 100644 +--- a/net/sched/act_ct.c ++++ b/net/sched/act_ct.c +@@ -1073,6 +1073,14 @@ TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, + */ + if (nf_conntrack_confirm(skb) != NF_ACCEPT) + goto drop; ++ ++ /* The ct may be dropped if a clash has been resolved, ++ * so it's necessary to retrieve it from skb again to ++ * prevent UAF. ++ */ ++ ct = nf_ct_get(skb, &ctinfo); ++ if (!ct) ++ skip_add = true; + } + + if (!skip_add) +diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c +index a463a63192c3c..8dde3548dc11c 100644 +--- a/net/sched/sch_ingress.c ++++ b/net/sched/sch_ingress.c +@@ -91,7 +91,7 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt, + entry = tcx_entry_fetch_or_create(dev, true, &created); + if (!entry) + return -ENOMEM; +- tcx_miniq_set_active(entry, true); ++ tcx_miniq_inc(entry); + mini_qdisc_pair_init(&q->miniqp, sch, &tcx_entry(entry)->miniq); + if (created) + tcx_entry_update(dev, entry, true); +@@ -121,7 +121,7 @@ static void ingress_destroy(struct Qdisc *sch) + tcf_block_put_ext(q->block, sch, &q->block_info); + + if (entry) { +- tcx_miniq_set_active(entry, false); ++ tcx_miniq_dec(entry); + if (!tcx_entry_is_active(entry)) { + tcx_entry_update(dev, NULL, true); + tcx_entry_free(entry); +@@ -256,7 +256,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt, + entry = tcx_entry_fetch_or_create(dev, true, &created); + if (!entry) + return -ENOMEM; +- tcx_miniq_set_active(entry, true); ++ tcx_miniq_inc(entry); + mini_qdisc_pair_init(&q->miniqp_ingress, sch, &tcx_entry(entry)->miniq); + if (created) + tcx_entry_update(dev, entry, true); +@@ -275,7 +275,7 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt, + entry = tcx_entry_fetch_or_create(dev, false, &created); + if (!entry) + return -ENOMEM; +- tcx_miniq_set_active(entry, true); ++ tcx_miniq_inc(entry); + mini_qdisc_pair_init(&q->miniqp_egress, sch, &tcx_entry(entry)->miniq); + if (created) + tcx_entry_update(dev, entry, false); +@@ -301,7 +301,7 @@ static void clsact_destroy(struct Qdisc *sch) + tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info); + + if (ingress_entry) { +- tcx_miniq_set_active(ingress_entry, false); ++ tcx_miniq_dec(ingress_entry); + if (!tcx_entry_is_active(ingress_entry)) { + tcx_entry_update(dev, NULL, true); + tcx_entry_free(ingress_entry); +@@ -309,7 +309,7 @@ static void clsact_destroy(struct Qdisc *sch) + } + + if (egress_entry) { +- tcx_miniq_set_active(egress_entry, false); ++ tcx_miniq_dec(egress_entry); + if (!tcx_entry_is_active(egress_entry)) { + tcx_entry_update(dev, NULL, false); + tcx_entry_free(egress_entry); +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c +index c3007f3e16f8c..c1fe2a6ea7976 100644 +--- a/net/sunrpc/xprtsock.c ++++ b/net/sunrpc/xprtsock.c +@@ -2422,6 +2422,13 @@ static void xs_tcp_setup_socket(struct work_struct *work) + transport->srcport = 0; + status = -EAGAIN; + break; ++ case -EPERM: ++ /* Happens, for instance, if a BPF program is preventing ++ * the connect. Remap the error so upper layers can better ++ * deal with it. ++ */ ++ status = -ECONNREFUSED; ++ fallthrough; + case -EINVAL: + /* Happens, for instance, if the user specified a link + * local IPv6 address without a scope-id. +diff --git a/scripts/ld-version.sh b/scripts/ld-version.sh +index a78b804b680cf..b9513d224476f 100755 +--- a/scripts/ld-version.sh ++++ b/scripts/ld-version.sh +@@ -57,9 +57,11 @@ else + fi + fi + +-# Some distributions append a package release number, as in 2.34-4.fc32 +-# Trim the hyphen and any characters that follow. +-version=${version%-*} ++# There may be something after the version, such as a distribution's package ++# release number (like Fedora's "2.34-4.fc32") or punctuation (like LLD briefly ++# added before the "compatible with GNU linkers" string), so remove everything ++# after just numbers and periods. ++version=${version%%[!0-9.]*} + + cversion=$(get_canonical_version $version) + min_cversion=$(get_canonical_version $min_version) +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index fed3f59d7bd69..7743cb31b6e06 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -9817,6 +9817,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), + SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3), + SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), ++ SND_PCI_QUIRK(0x103c, 0x84a6, "HP 250 G7 Notebook PC", ALC269_FIXUP_HP_LINE1_MIC1_LED), + SND_PCI_QUIRK(0x103c, 0x84ae, "HP 15-db0403ng", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), + SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN), + SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), +@@ -10086,6 +10087,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), + SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE), + SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE), ++ SND_PCI_QUIRK(0x10ec, 0x11bc, "VAIO VJFE-IL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), + SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), + SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK), +@@ -10182,6 +10184,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1558, 0xa600, "Clevo NL50NU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0xa650, "Clevo NP[567]0SN[CD]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0xa671, "Clevo NP70SN[CDE]", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), ++ SND_PCI_QUIRK(0x1558, 0xa763, "Clevo V54x_6x_TU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0xb018, "Clevo NP50D[BE]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0xb019, "Clevo NH77D[BE]Q", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1558, 0xb022, "Clevo NH77D[DC][QW]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), +@@ -10338,6 +10341,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC), ++ SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO), + SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME), + SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC), +diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c +index f3cefd8660812..19ec1a45737ea 100644 +--- a/sound/soc/sof/intel/hda-dai.c ++++ b/sound/soc/sof/intel/hda-dai.c +@@ -534,12 +534,6 @@ static int hda_dai_suspend(struct hdac_bus *bus) + sdai = swidget->private; + ops = sdai->platform_private; + +- ret = hda_link_dma_cleanup(hext_stream->link_substream, +- hext_stream, +- cpu_dai); +- if (ret < 0) +- return ret; +- + /* for consistency with TRIGGER_SUSPEND */ + if (ops->post_trigger) { + ret = ops->post_trigger(sdev, cpu_dai, +@@ -548,6 +542,12 @@ static int hda_dai_suspend(struct hdac_bus *bus) + if (ret < 0) + return ret; + } ++ ++ ret = hda_link_dma_cleanup(hext_stream->link_substream, ++ hext_stream, ++ cpu_dai); ++ if (ret < 0) ++ return ret; + } + } + +diff --git a/tools/testing/selftests/net/gro.c b/tools/testing/selftests/net/gro.c +index b204df4f33322..30024d0ed3739 100644 +--- a/tools/testing/selftests/net/gro.c ++++ b/tools/testing/selftests/net/gro.c +@@ -113,9 +113,6 @@ static void setup_sock_filter(int fd) + next_off = offsetof(struct ipv6hdr, nexthdr); + ipproto_off = ETH_HLEN + next_off; + +- /* Overridden later if exthdrs are used: */ +- opt_ipproto_off = ipproto_off; +- + if (strcmp(testname, "ip") == 0) { + if (proto == PF_INET) + optlen = sizeof(struct ip_timestamp); +diff --git a/tools/testing/selftests/wireguard/qemu/Makefile b/tools/testing/selftests/wireguard/qemu/Makefile +index e95bd56b332f7..35856b11c1435 100644 +--- a/tools/testing/selftests/wireguard/qemu/Makefile ++++ b/tools/testing/selftests/wireguard/qemu/Makefile +@@ -109,9 +109,9 @@ KERNEL_ARCH := x86_64 + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage + QEMU_VPORT_RESULT := virtio-serial-device + ifeq ($(HOST_ARCH),$(ARCH)) +-QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi ++QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off,acpi=off + else +-QEMU_MACHINE := -cpu max -machine microvm -no-acpi ++QEMU_MACHINE := -cpu max -machine microvm,acpi=off + endif + else ifeq ($(ARCH),i686) + CHOST := i686-linux-musl +@@ -120,9 +120,9 @@ KERNEL_ARCH := x86 + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage + QEMU_VPORT_RESULT := virtio-serial-device + ifeq ($(subst x86_64,i686,$(HOST_ARCH)),$(ARCH)) +-QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off -no-acpi ++QEMU_MACHINE := -cpu host -machine microvm,accel=kvm,pit=off,pic=off,rtc=off,acpi=off + else +-QEMU_MACHINE := -cpu coreduo -machine microvm -no-acpi ++QEMU_MACHINE := -cpu coreduo -machine microvm,acpi=off + endif + else ifeq ($(ARCH),mips64) + CHOST := mips64-linux-musl