diff --git a/CREDITS b/CREDITS index 903ea238e64f..fa5397f4ebcd 100644 --- a/CREDITS +++ b/CREDITS @@ -2036,6 +2036,10 @@ S: Botanicka' 68a S: 602 00 Brno S: Czech Republic +N: Karsten Keil +E: isdn@linux-pingi.de +D: ISDN subsystem maintainer + N: Jakob Kemi E: jakob.kemi@telia.com D: V4L W9966 Webcam driver diff --git a/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml b/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml index 5caa3779660d..5491d0775ede 100644 --- a/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml +++ b/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml @@ -180,9 +180,9 @@ allOf: then: properties: reg: - minItems: 2 + maxItems: 2 reg-names: - minItems: 2 + maxItems: 2 else: properties: reg: diff --git a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml index 6a47e08e0e97..f9cffbb2df07 100644 --- a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml +++ b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.yaml @@ -142,7 +142,9 @@ allOf: required: - orientation-switch then: - $ref: /schemas/usb/usb-switch.yaml# + allOf: + - $ref: /schemas/usb/usb-switch.yaml# + - $ref: /schemas/usb/usb-switch-ports.yaml# unevaluatedProperties: false diff --git a/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml b/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml index e906403208c0..ea1135c91fb7 100644 --- a/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml +++ b/Documentation/devicetree/bindings/phy/samsung,usb3-drd-phy.yaml @@ -125,7 +125,9 @@ allOf: contains: const: google,gs101-usb31drd-phy then: - $ref: /schemas/usb/usb-switch.yaml# + allOf: + - $ref: /schemas/usb/usb-switch.yaml# + - $ref: /schemas/usb/usb-switch-ports.yaml# properties: clocks: diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml index e925cd4c3ac8..72483bc3274d 100644 --- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml +++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml @@ -197,6 +197,7 @@ allOf: - renesas,rcar-gen2-scif - renesas,rcar-gen3-scif - renesas,rcar-gen4-scif + - renesas,rcar-gen5-scif then: properties: interrupts: diff --git a/Documentation/devicetree/bindings/spi/spi-cadence.yaml b/Documentation/devicetree/bindings/spi/spi-cadence.yaml index 8de96abe9da1..27414b78d61d 100644 --- a/Documentation/devicetree/bindings/spi/spi-cadence.yaml +++ b/Documentation/devicetree/bindings/spi/spi-cadence.yaml @@ -14,9 +14,14 @@ allOf: properties: compatible: - enum: - - cdns,spi-r1p6 - - xlnx,zynq-spi-r1p6 + oneOf: + - enum: + - xlnx,zynq-spi-r1p6 + - items: + - enum: + - xlnx,zynqmp-spi-r1p6 + - xlnx,versal-net-spi-r1p6 + - const: cdns,spi-r1p6 reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml index 748faf7f7081..ce6762c92fda 100644 --- a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml +++ b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml @@ -34,6 +34,7 @@ properties: - rockchip,rk3328-spi - rockchip,rk3368-spi - rockchip,rk3399-spi + - rockchip,rk3506-spi - rockchip,rk3528-spi - rockchip,rk3562-spi - rockchip,rk3568-spi diff --git a/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml b/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml index e3a7df91f7f1..89b1fb90aeeb 100644 --- a/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml +++ b/Documentation/devicetree/bindings/usb/fcs,fsa4480.yaml @@ -76,6 +76,7 @@ required: allOf: - $ref: usb-switch.yaml# + - $ref: usb-switch-ports.yaml# additionalProperties: false diff --git a/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml b/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml index baf130669c38..73e7a60a0060 100644 --- a/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml +++ b/Documentation/devicetree/bindings/usb/fsl,imx8mp-dwc3.yaml @@ -89,13 +89,21 @@ required: - reg - "#address-cells" - "#size-cells" - - dma-ranges - ranges - clocks - clock-names - interrupts - power-domains +allOf: + - if: + properties: + compatible: + const: fsl,imx8mp-dwc3 + then: + required: + - dma-ranges + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml b/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml index e588514fab2d..793662f6f3bf 100644 --- a/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml +++ b/Documentation/devicetree/bindings/usb/gpio-sbu-mux.yaml @@ -52,6 +52,7 @@ required: allOf: - $ref: usb-switch.yaml# + - $ref: usb-switch-ports.yaml# - if: required: - mode-switch diff --git a/Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml b/Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml index d805dde80796..4d2fcaa71870 100644 --- a/Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml +++ b/Documentation/devicetree/bindings/usb/nxp,ptn36502.yaml @@ -46,6 +46,7 @@ required: allOf: - $ref: usb-switch.yaml# + - $ref: usb-switch-ports.yaml# additionalProperties: false diff --git a/Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml b/Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml index 589914d22bf2..25fab5fdc2cd 100644 --- a/Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml +++ b/Documentation/devicetree/bindings/usb/onnn,nb7vpq904m.yaml @@ -91,6 +91,7 @@ required: allOf: - $ref: usb-switch.yaml# + - $ref: usb-switch-ports.yaml# additionalProperties: false diff --git a/Documentation/devicetree/bindings/usb/parade,ps8830.yaml b/Documentation/devicetree/bindings/usb/parade,ps8830.yaml index aeb33667818e..eaeab1c01a59 100644 --- a/Documentation/devicetree/bindings/usb/parade,ps8830.yaml +++ b/Documentation/devicetree/bindings/usb/parade,ps8830.yaml @@ -81,6 +81,7 @@ required: allOf: - $ref: usb-switch.yaml# + - $ref: usb-switch-ports.yaml# additionalProperties: false diff --git a/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml index dfd084ed9024..d49a58d5478f 100644 --- a/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml +++ b/Documentation/devicetree/bindings/usb/qcom,snps-dwc3.yaml @@ -68,6 +68,7 @@ properties: - qcom,sm8550-dwc3 - qcom,sm8650-dwc3 - qcom,x1e80100-dwc3 + - qcom,x1e80100-dwc3-mp - const: qcom,snps-dwc3 reg: @@ -460,8 +461,10 @@ allOf: then: properties: interrupts: + minItems: 4 maxItems: 5 interrupt-names: + minItems: 4 items: - const: dwc_usb3 - const: pwr_event diff --git a/Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml b/Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml index 96346723f3e9..96dcec9b7620 100644 --- a/Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml +++ b/Documentation/devicetree/bindings/usb/qcom,wcd939x-usbss.yaml @@ -60,6 +60,7 @@ required: allOf: - $ref: usb-switch.yaml# + - $ref: usb-switch-ports.yaml# additionalProperties: false diff --git a/Documentation/devicetree/bindings/usb/ti,tusb1046.yaml b/Documentation/devicetree/bindings/usb/ti,tusb1046.yaml index f713cac4a8ac..e1501ea6b50b 100644 --- a/Documentation/devicetree/bindings/usb/ti,tusb1046.yaml +++ b/Documentation/devicetree/bindings/usb/ti,tusb1046.yaml @@ -11,6 +11,7 @@ maintainers: allOf: - $ref: usb-switch.yaml# + - $ref: usb-switch-ports.yaml# properties: compatible: diff --git a/Documentation/devicetree/bindings/usb/usb-switch-ports.yaml b/Documentation/devicetree/bindings/usb/usb-switch-ports.yaml new file mode 100644 index 000000000000..6bf0c97e30ae --- /dev/null +++ b/Documentation/devicetree/bindings/usb/usb-switch-ports.yaml @@ -0,0 +1,68 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/usb/usb-switch-ports.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: USB Orientation and Mode Switches Ports Graph Properties + +maintainers: + - Greg Kroah-Hartman + +description: + Ports Graph properties for devices handling USB mode and orientation switching. + +properties: + port: + $ref: /schemas/graph.yaml#/$defs/port-base + description: + A port node to link the device to a TypeC controller for the purpose of + handling altmode muxing and orientation switching. + + properties: + endpoint: + $ref: /schemas/graph.yaml#/$defs/endpoint-base + unevaluatedProperties: false + properties: + data-lanes: + $ref: /schemas/types.yaml#/definitions/uint32-array + minItems: 1 + maxItems: 8 + uniqueItems: true + items: + maximum: 8 + + ports: + $ref: /schemas/graph.yaml#/properties/ports + properties: + port@0: + $ref: /schemas/graph.yaml#/properties/port + description: + Super Speed (SS) Output endpoint to the Type-C connector + + port@1: + $ref: /schemas/graph.yaml#/$defs/port-base + description: + Super Speed (SS) Input endpoint from the Super-Speed PHY + unevaluatedProperties: false + + properties: + endpoint: + $ref: /schemas/graph.yaml#/$defs/endpoint-base + unevaluatedProperties: false + properties: + data-lanes: + $ref: /schemas/types.yaml#/definitions/uint32-array + minItems: 1 + maxItems: 8 + uniqueItems: true + items: + maximum: 8 + +oneOf: + - required: + - port + - required: + - ports + +additionalProperties: true diff --git a/Documentation/devicetree/bindings/usb/usb-switch.yaml b/Documentation/devicetree/bindings/usb/usb-switch.yaml index 896201912630..f77731493dc4 100644 --- a/Documentation/devicetree/bindings/usb/usb-switch.yaml +++ b/Documentation/devicetree/bindings/usb/usb-switch.yaml @@ -25,56 +25,4 @@ properties: description: Possible handler of SuperSpeed signals retiming type: boolean - port: - $ref: /schemas/graph.yaml#/$defs/port-base - description: - A port node to link the device to a TypeC controller for the purpose of - handling altmode muxing and orientation switching. - - properties: - endpoint: - $ref: /schemas/graph.yaml#/$defs/endpoint-base - unevaluatedProperties: false - properties: - data-lanes: - $ref: /schemas/types.yaml#/definitions/uint32-array - minItems: 1 - maxItems: 8 - uniqueItems: true - items: - maximum: 8 - - ports: - $ref: /schemas/graph.yaml#/properties/ports - properties: - port@0: - $ref: /schemas/graph.yaml#/properties/port - description: - Super Speed (SS) Output endpoint to the Type-C connector - - port@1: - $ref: /schemas/graph.yaml#/$defs/port-base - description: - Super Speed (SS) Input endpoint from the Super-Speed PHY - unevaluatedProperties: false - - properties: - endpoint: - $ref: /schemas/graph.yaml#/$defs/endpoint-base - unevaluatedProperties: false - properties: - data-lanes: - $ref: /schemas/types.yaml#/definitions/uint32-array - minItems: 1 - maxItems: 8 - uniqueItems: true - items: - maximum: 8 - -oneOf: - - required: - - port - - required: - - ports - additionalProperties: true diff --git a/Documentation/netlink/specs/dpll.yaml b/Documentation/netlink/specs/dpll.yaml index cafb4ec20447..80728f6f9bc8 100644 --- a/Documentation/netlink/specs/dpll.yaml +++ b/Documentation/netlink/specs/dpll.yaml @@ -605,6 +605,8 @@ operations: reply: &pin-attrs attributes: - id + - module-name + - clock-id - board-label - panel-label - package-label diff --git a/Documentation/networking/netconsole.rst b/Documentation/networking/netconsole.rst index 59cb9982afe6..2555e75e5cc1 100644 --- a/Documentation/networking/netconsole.rst +++ b/Documentation/networking/netconsole.rst @@ -19,9 +19,6 @@ Userdata append support by Matthew Wood , Jan 22 2024 Sysdata append support by Breno Leitao , Jan 15 2025 -Please send bug reports to Matt Mackall -Satyam Sharma , and Cong Wang - Introduction: ============= diff --git a/MAINTAINERS b/MAINTAINERS index d652f4f27756..1ab7e8746299 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1997,6 +1997,10 @@ F: include/uapi/linux/if_arcnet.h ARM AND ARM64 SoC SUB-ARCHITECTURES (COMMON PARTS) M: Arnd Bergmann +M: Krzysztof Kozlowski +M: Alexandre Belloni +M: Linus Walleij +R: Drew Fustini L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: soc@lists.linux.dev S: Maintained @@ -13112,6 +13116,15 @@ F: include/uapi/linux/io_uring.h F: include/uapi/linux/io_uring/ F: io_uring/ +IO_URING ZCRX +M: Pavel Begunkov +L: io-uring@vger.kernel.org +L: netdev@vger.kernel.org +T: git https://github.com/isilence/linux.git zcrx/for-next +T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux.git +S: Maintained +F: io_uring/zcrx.* + IPMI SUBSYSTEM M: Corey Minyard L: openipmi-developer@lists.sourceforge.net (moderated for non-subscribers) @@ -13247,10 +13260,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git mast F: drivers/infiniband/ulp/isert ISDN/CMTP OVER BLUETOOTH -M: Karsten Keil -L: isdn4linux@listserv.isdn4linux.de (subscribers-only) L: netdev@vger.kernel.org -S: Odd Fixes +S: Orphan W: http://www.isdn4linux.de F: Documentation/isdn/ F: drivers/isdn/capi/ @@ -13259,10 +13270,8 @@ F: include/uapi/linux/isdn/ F: net/bluetooth/cmtp/ ISDN/mISDN SUBSYSTEM -M: Karsten Keil -L: isdn4linux@listserv.isdn4linux.de (subscribers-only) L: netdev@vger.kernel.org -S: Maintained +S: Orphan W: http://www.isdn4linux.de F: drivers/isdn/Kconfig F: drivers/isdn/Makefile @@ -14395,6 +14404,7 @@ F: tools/memory-model/ LINUX-NEXT TREE M: Stephen Rothwell +M: Mark Brown L: linux-next@vger.kernel.org S: Supported B: mailto:linux-next@vger.kernel.org and the appropriate development tree @@ -21326,6 +21336,7 @@ F: drivers/media/platform/qcom/venus/ QUALCOMM WCN36XX WIRELESS DRIVER M: Loic Poulain L: wcn36xx@lists.infradead.org +L: linux-wireless@vger.kernel.org S: Supported W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx F: drivers/net/wireless/ath/wcn36xx/ diff --git a/Makefile b/Makefile index d14824792227..b34a1f4c0396 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 18 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc3 NAME = Baby Opossum Posse # *DOCUMENTATION* diff --git a/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi b/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi index c78ed064d166..1eb6406449d1 100644 --- a/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi +++ b/arch/arm/boot/dts/broadcom/bcm2711-rpi.dtsi @@ -77,6 +77,14 @@ &i2c0 { /delete-property/ pinctrl-0; }; +&pm { + clocks = <&firmware_clocks 5>, + <&clocks BCM2835_CLOCK_PERI_IMAGE>, + <&clocks BCM2835_CLOCK_H264>, + <&clocks BCM2835_CLOCK_ISP>; + clock-names = "v3d", "peri_image", "h264", "isp"; +}; + &rmem { /* * RPi4's co-processor will copy the board's bootloader configuration diff --git a/arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi b/arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi index 8b3c21d9f333..fa9d784c88b6 100644 --- a/arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi +++ b/arch/arm/boot/dts/broadcom/bcm2835-rpi-common.dtsi @@ -13,7 +13,16 @@ &hdmi { clock-names = "pixel", "hdmi"; }; +&pm { + clocks = <&firmware_clocks 5>, + <&clocks BCM2835_CLOCK_PERI_IMAGE>, + <&clocks BCM2835_CLOCK_H264>, + <&clocks BCM2835_CLOCK_ISP>; + clock-names = "v3d", "peri_image", "h264", "isp"; +}; + &v3d { + clocks = <&firmware_clocks 5>; power-domains = <&power RPI_POWER_DOMAIN_V3D>; }; diff --git a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi index e77a66adc22a..205b87f557d6 100644 --- a/arch/arm64/boot/dts/broadcom/bcm2712.dtsi +++ b/arch/arm64/boot/dts/broadcom/bcm2712.dtsi @@ -326,6 +326,8 @@ gicv2: interrupt-controller@7fff9000 { <0x7fffe000 0x2000>; interrupt-controller; #address-cells = <0>; + interrupts = ; #interrupt-cells = <3>; }; diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index aa89c2e67ebc..0944e296dd4a 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -293,7 +293,8 @@ static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot) static inline pte_t pte_mkwrite_novma(pte_t pte) { pte = set_pte_bit(pte, __pgprot(PTE_WRITE)); - pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); + if (pte_sw_dirty(pte)) + pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); return pte; } diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index a86c897017df..cd5912ba617b 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -35,7 +35,7 @@ void copy_highpage(struct page *to, struct page *from) from != folio_page(src, 0)) return; - WARN_ON_ONCE(!folio_try_hugetlb_mte_tagging(dst)); + folio_try_hugetlb_mte_tagging(dst); /* * Populate tags for all subpages. @@ -51,8 +51,13 @@ void copy_highpage(struct page *to, struct page *from) } folio_set_hugetlb_mte_tagged(dst); } else if (page_mte_tagged(from)) { - /* It's a new page, shouldn't have been tagged yet */ - WARN_ON_ONCE(!try_page_mte_tagging(to)); + /* + * Most of the time it's a new page that shouldn't have been + * tagged yet. However, folio migration can end up reusing the + * same page without untagging it. Ignore the warning if the + * page is already tagged. + */ + try_page_mte_tagging(to); mte_copy_page_tags(kto, kfrom); set_page_mte_tagged(to); diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c index 3a2836e9d856..816570514c37 100644 --- a/arch/mips/mti-malta/malta-setup.c +++ b/arch/mips/mti-malta/malta-setup.c @@ -47,7 +47,7 @@ static struct resource standard_io_resources[] = { .name = "keyboard", .start = 0x60, .end = 0x6f, - .flags = IORESOURCE_IO | IORESOURCE_BUSY + .flags = IORESOURCE_IO }, { .name = "dma page reg", @@ -213,7 +213,7 @@ void __init plat_mem_setup(void) /* Request I/O space for devices used on the Malta board. */ for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++) - request_resource(&ioport_resource, standard_io_resources+i); + insert_resource(&ioport_resource, standard_io_resources + i); /* * Enable DMA channel 4 (cascade channel) in the PIIX4 south bridge. diff --git a/arch/mips/pci/pci-malta.c b/arch/mips/pci/pci-malta.c index 6aefdf20ca05..2e35aeba45bc 100644 --- a/arch/mips/pci/pci-malta.c +++ b/arch/mips/pci/pci-malta.c @@ -230,8 +230,7 @@ void __init mips_pcibios_init(void) } /* PIIX4 ACPI starts at 0x1000 */ - if (controller->io_resource->start < 0x00001000UL) - controller->io_resource->start = 0x00001000UL; + PCIBIOS_MIN_IO = 0x1000; iomem_resource.end &= 0xfffffffffULL; /* 64 GB */ ioport_resource.end = controller->io_resource->end; diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h index 8bd2a11382a3..ac28066bb564 100644 --- a/arch/riscv/include/asm/asm.h +++ b/arch/riscv/include/asm/asm.h @@ -84,15 +84,9 @@ .endm #ifdef CONFIG_SMP -#ifdef CONFIG_32BIT -#define PER_CPU_OFFSET_SHIFT 2 -#else -#define PER_CPU_OFFSET_SHIFT 3 -#endif - .macro asm_per_cpu dst sym tmp lw \tmp, TASK_TI_CPU_NUM(tp) - slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT + slli \tmp, \tmp, RISCV_LGPTR la \dst, __per_cpu_offset add \dst, \dst, \tmp REG_L \tmp, 0(\dst) diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index fbd0e4306c93..62837fa981e8 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -31,6 +31,8 @@ struct riscv_isainfo { DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo); +extern const struct seq_operations cpuinfo_op; + /* Per-cpu ISA extensions. */ extern struct riscv_isainfo hart_isa[NR_CPUS]; diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h index 948d2b34e94e..58f8dda73259 100644 --- a/arch/riscv/include/asm/hwprobe.h +++ b/arch/riscv/include/asm/hwprobe.h @@ -42,4 +42,11 @@ static inline bool riscv_hwprobe_pair_cmp(struct riscv_hwprobe *pair, return pair->value == other_pair->value; } +#ifdef CONFIG_MMU +void riscv_hwprobe_register_async_probe(void); +void riscv_hwprobe_complete_async_probe(void); +#else +static inline void riscv_hwprobe_register_async_probe(void) {} +static inline void riscv_hwprobe_complete_async_probe(void) {} +#endif #endif diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h index 1018d2216901..6e789fa58514 100644 --- a/arch/riscv/include/asm/pgtable-64.h +++ b/arch/riscv/include/asm/pgtable-64.h @@ -69,6 +69,8 @@ typedef struct { #define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t)) +#define MAX_POSSIBLE_PHYSMEM_BITS 56 + /* * rv64 PTE format: * | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 29e994a9afb6..5a08eb5fe99f 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -654,6 +654,8 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot) return __pgprot(prot); } +#define pgprot_dmacoherent pgprot_writecombine + /* * Both Svade and Svadu control the hardware behavior when the PTE A/D bits need to be set. By * default the M-mode firmware enables the hardware updating scheme when only Svadu is present in diff --git a/arch/riscv/include/asm/vdso/arch_data.h b/arch/riscv/include/asm/vdso/arch_data.h index da57a3786f7a..88b37af55175 100644 --- a/arch/riscv/include/asm/vdso/arch_data.h +++ b/arch/riscv/include/asm/vdso/arch_data.h @@ -12,6 +12,12 @@ struct vdso_arch_data { /* Boolean indicating all CPUs have the same static hwprobe values. */ __u8 homogeneous_cpus; + + /* + * A gate to check and see if the hwprobe data is actually ready, as + * probing is deferred to avoid boot slowdowns. + */ + __u8 ready; }; #endif /* __RISCV_ASM_VDSO_ARCH_DATA_H */ diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index f6b13e9f5e6c..3dbc8cc557dd 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -62,10 +62,8 @@ int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned lo return -ENODEV; } - if (!of_device_is_available(node)) { - pr_info("CPU with hartid=%lu is not available\n", *hart); + if (!of_device_is_available(node)) return -ENODEV; - } if (of_property_read_string(node, "riscv,isa-base", &isa)) goto old_interface; diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 67b59699357d..72ca768f4e91 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -932,9 +932,9 @@ static int has_thead_homogeneous_vlenb(void) { int cpu; u32 prev_vlenb = 0; - u32 vlenb; + u32 vlenb = 0; - /* Ignore thead,vlenb property if xtheavector is not enabled in the kernel */ + /* Ignore thead,vlenb property if xtheadvector is not enabled in the kernel */ if (!IS_ENABLED(CONFIG_RISCV_ISA_XTHEADVECTOR)) return 0; diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index e650dec44817..5ed5095320e6 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -40,6 +40,17 @@ enum ipi_message_type { IPI_MAX }; +static const char * const ipi_names[] = { + [IPI_RESCHEDULE] = "Rescheduling interrupts", + [IPI_CALL_FUNC] = "Function call interrupts", + [IPI_CPU_STOP] = "CPU stop interrupts", + [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", + [IPI_IRQ_WORK] = "IRQ work interrupts", + [IPI_TIMER] = "Timer broadcast interrupts", + [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts", + [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts", +}; + unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = { [0 ... NR_CPUS-1] = INVALID_HARTID }; @@ -199,7 +210,7 @@ void riscv_ipi_set_virq_range(int virq, int nr) /* Request IPIs */ for (i = 0; i < nr_ipi; i++) { err = request_percpu_irq(ipi_virq_base + i, handle_IPI, - "IPI", &ipi_dummy_dev); + ipi_names[i], &ipi_dummy_dev); WARN_ON(err); ipi_desc[i] = irq_to_desc(ipi_virq_base + i); @@ -210,17 +221,6 @@ void riscv_ipi_set_virq_range(int virq, int nr) riscv_ipi_enable(); } -static const char * const ipi_names[] = { - [IPI_RESCHEDULE] = "Rescheduling interrupts", - [IPI_CALL_FUNC] = "Function call interrupts", - [IPI_CPU_STOP] = "CPU stop interrupts", - [IPI_CPU_CRASH_STOP] = "CPU stop (for crash dump) interrupts", - [IPI_IRQ_WORK] = "IRQ work interrupts", - [IPI_TIMER] = "Timer broadcast interrupts", - [IPI_CPU_BACKTRACE] = "CPU backtrace interrupts", - [IPI_KGDB_ROUNDUP] = "KGDB roundup interrupts", -}; - void show_ipi_stats(struct seq_file *p, int prec) { unsigned int cpu, i; diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c index 000f4451a9d8..199d13f86f31 100644 --- a/arch/riscv/kernel/sys_hwprobe.c +++ b/arch/riscv/kernel/sys_hwprobe.c @@ -5,6 +5,9 @@ * more details. */ #include +#include +#include +#include #include #include #include @@ -28,6 +31,11 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair, bool first = true; int cpu; + if (pair->key != RISCV_HWPROBE_KEY_MVENDORID && + pair->key != RISCV_HWPROBE_KEY_MIMPID && + pair->key != RISCV_HWPROBE_KEY_MARCHID) + goto out; + for_each_cpu(cpu, cpus) { u64 cpu_id; @@ -58,6 +66,7 @@ static void hwprobe_arch_id(struct riscv_hwprobe *pair, } } +out: pair->value = id; } @@ -454,28 +463,32 @@ static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs, return 0; } -static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, - size_t pair_count, size_t cpusetsize, - unsigned long __user *cpus_user, - unsigned int flags) -{ - if (flags & RISCV_HWPROBE_WHICH_CPUS) - return hwprobe_get_cpus(pairs, pair_count, cpusetsize, - cpus_user, flags); - - return hwprobe_get_values(pairs, pair_count, cpusetsize, - cpus_user, flags); -} - #ifdef CONFIG_MMU -static int __init init_hwprobe_vdso_data(void) +static DECLARE_COMPLETION(boot_probes_done); +static atomic_t pending_boot_probes = ATOMIC_INIT(1); + +void riscv_hwprobe_register_async_probe(void) +{ + atomic_inc(&pending_boot_probes); +} + +void riscv_hwprobe_complete_async_probe(void) +{ + if (atomic_dec_and_test(&pending_boot_probes)) + complete(&boot_probes_done); +} + +static int complete_hwprobe_vdso_data(void) { struct vdso_arch_data *avd = vdso_k_arch_data; u64 id_bitsmash = 0; struct riscv_hwprobe pair; int key; + if (unlikely(!atomic_dec_and_test(&pending_boot_probes))) + wait_for_completion(&boot_probes_done); + /* * Initialize vDSO data with the answers for the "all CPUs" case, to * save a syscall in the common case. @@ -503,13 +516,52 @@ static int __init init_hwprobe_vdso_data(void) * vDSO should defer to the kernel for exotic cpu masks. */ avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; + + /* + * Make sure all the VDSO values are visible before we look at them. + * This pairs with the implicit "no speculativly visible accesses" + * barrier in the VDSO hwprobe code. + */ + smp_wmb(); + avd->ready = true; + return 0; +} + +static int __init init_hwprobe_vdso_data(void) +{ + struct vdso_arch_data *avd = vdso_k_arch_data; + + /* + * Prevent the vDSO cached values from being used, as they're not ready + * yet. + */ + avd->ready = false; return 0; } arch_initcall_sync(init_hwprobe_vdso_data); +#else + +static int complete_hwprobe_vdso_data(void) { return 0; } + #endif /* CONFIG_MMU */ +static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, + size_t pair_count, size_t cpusetsize, + unsigned long __user *cpus_user, + unsigned int flags) +{ + DO_ONCE_SLEEPABLE(complete_hwprobe_vdso_data); + + if (flags & RISCV_HWPROBE_WHICH_CPUS) + return hwprobe_get_cpus(pairs, pair_count, cpusetsize, + cpus_user, flags); + + return hwprobe_get_values(pairs, pair_count, cpusetsize, + cpus_user, flags); +} + SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, size_t, pair_count, size_t, cpusetsize, unsigned long __user *, cpus, unsigned int, flags) diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c index ae2068425fbc..70b5e6927620 100644 --- a/arch/riscv/kernel/unaligned_access_speed.c +++ b/arch/riscv/kernel/unaligned_access_speed.c @@ -379,6 +379,7 @@ static void check_vector_unaligned_access(struct work_struct *work __always_unus static int __init vec_check_unaligned_access_speed_all_cpus(void *unused __always_unused) { schedule_on_each_cpu(check_vector_unaligned_access); + riscv_hwprobe_complete_async_probe(); return 0; } @@ -473,8 +474,12 @@ static int __init check_unaligned_access_all_cpus(void) per_cpu(vector_misaligned_access, cpu) = unaligned_vector_speed_param; } else if (!check_vector_unaligned_access_emulated_all_cpus() && IS_ENABLED(CONFIG_RISCV_PROBE_VECTOR_UNALIGNED_ACCESS)) { - kthread_run(vec_check_unaligned_access_speed_all_cpus, - NULL, "vec_check_unaligned_access_speed_all_cpus"); + riscv_hwprobe_register_async_probe(); + if (IS_ERR(kthread_run(vec_check_unaligned_access_speed_all_cpus, + NULL, "vec_check_unaligned_access_speed_all_cpus"))) { + pr_warn("Failed to create vec_unalign_check kthread\n"); + riscv_hwprobe_complete_async_probe(); + } } /* diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c index 2ddeba6c68dd..8f45500d0a6e 100644 --- a/arch/riscv/kernel/vdso/hwprobe.c +++ b/arch/riscv/kernel/vdso/hwprobe.c @@ -27,7 +27,7 @@ static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count, * homogeneous, then this function can handle requests for arbitrary * masks. */ - if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus)) + if (flags != 0 || (!all_cpus && !avd->homogeneous_cpus) || unlikely(!avd->ready)) return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags); /* This is something we can handle, fill out the pairs. */ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 6a526ae1fe99..d7fa03bf51b4 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1463,7 +1463,9 @@ static void __init retbleed_update_mitigation(void) break; default: if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) { - pr_err(RETBLEED_INTEL_MSG); + if (retbleed_mitigation != RETBLEED_MITIGATION_NONE) + pr_err(RETBLEED_INTEL_MSG); + retbleed_mitigation = RETBLEED_MITIGATION_NONE; } } @@ -1825,13 +1827,6 @@ void unpriv_ebpf_notify(int new_state) } #endif -static inline bool match_option(const char *arg, int arglen, const char *opt) -{ - int len = strlen(opt); - - return len == arglen && !strncmp(arg, opt, len); -} - /* The kernel command line selection for spectre v2 */ enum spectre_v2_mitigation_cmd { SPECTRE_V2_CMD_NONE, diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index cdce885e2fd5..28ed8c089024 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -194,7 +194,7 @@ static bool need_sha_check(u32 cur_rev) } switch (cur_rev >> 8) { - case 0x80012: return cur_rev <= 0x800126f; break; + case 0x80012: return cur_rev <= 0x8001277; break; case 0x80082: return cur_rev <= 0x800820f; break; case 0x83010: return cur_rev <= 0x830107c; break; case 0x86001: return cur_rev <= 0x860010e; break; diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index 2cd25a0d4637..fe1a2aa53c16 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -458,7 +458,16 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) r->mon.mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS; } - if (rdt_cpu_has(X86_FEATURE_ABMC)) { + /* + * resctrl assumes a system that supports assignable counters can + * switch to "default" mode. Ensure that there is a "default" mode + * to switch to. This enforces a dependency between the independent + * X86_FEATURE_ABMC and X86_FEATURE_CQM_MBM_TOTAL/X86_FEATURE_CQM_MBM_LOCAL + * hardware features. + */ + if (rdt_cpu_has(X86_FEATURE_ABMC) && + (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL) || + rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))) { r->mon.mbm_cntr_assignable = true; cpuid_count(0x80000020, 5, &eax, &ebx, &ecx, &edx); r->mon.num_mbm_cntrs = (ebx & GENMASK(15, 0)) + 1; diff --git a/block/blk-settings.c b/block/blk-settings.c index 54cffaae4df4..d74b13ec8e54 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -184,6 +184,16 @@ static int blk_validate_integrity_limits(struct queue_limits *lim) if (!bi->interval_exp) bi->interval_exp = ilog2(lim->logical_block_size); + /* + * The PI generation / validation helpers do not expect intervals to + * straddle multiple bio_vecs. Enforce alignment so that those are + * never generated, and that each buffer is aligned as expected. + */ + if (bi->csum_type) { + lim->dma_alignment = max(lim->dma_alignment, + (1U << bi->interval_exp) - 1); + } + return 0; } diff --git a/drivers/acpi/riscv/rimt.c b/drivers/acpi/riscv/rimt.c index 683fcfe35c31..7f423405e5ef 100644 --- a/drivers/acpi/riscv/rimt.c +++ b/drivers/acpi/riscv/rimt.c @@ -61,30 +61,6 @@ static int rimt_set_fwnode(struct acpi_rimt_node *rimt_node, return 0; } -/** - * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node - * - * @node: RIMT table node to be looked-up - * - * Returns: fwnode_handle pointer on success, NULL on failure - */ -static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node) -{ - struct fwnode_handle *fwnode = NULL; - struct rimt_fwnode *curr; - - spin_lock(&rimt_fwnode_lock); - list_for_each_entry(curr, &rimt_fwnode_list, list) { - if (curr->rimt_node == node) { - fwnode = curr->fwnode; - break; - } - } - spin_unlock(&rimt_fwnode_lock); - - return fwnode; -} - static acpi_status rimt_match_node_callback(struct acpi_rimt_node *node, void *context) { @@ -202,6 +178,67 @@ static struct acpi_rimt_node *rimt_scan_node(enum acpi_rimt_node_type type, return NULL; } +/* + * RISC-V supports IOMMU as a PCI device or a platform device. + * When it is a platform device, there should be a namespace device as + * well along with RIMT. To create the link between RIMT information and + * the platform device, the IOMMU driver should register itself with the + * RIMT module. This is true for PCI based IOMMU as well. + */ +int rimt_iommu_register(struct device *dev) +{ + struct fwnode_handle *rimt_fwnode; + struct acpi_rimt_node *node; + + node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev); + if (!node) { + pr_err("Could not find IOMMU node in RIMT\n"); + return -ENODEV; + } + + if (dev_is_pci(dev)) { + rimt_fwnode = acpi_alloc_fwnode_static(); + if (!rimt_fwnode) + return -ENOMEM; + + rimt_fwnode->dev = dev; + if (!dev->fwnode) + dev->fwnode = rimt_fwnode; + + rimt_set_fwnode(node, rimt_fwnode); + } else { + rimt_set_fwnode(node, dev->fwnode); + } + + return 0; +} + +#ifdef CONFIG_IOMMU_API + +/** + * rimt_get_fwnode() - Retrieve fwnode associated with an RIMT node + * + * @node: RIMT table node to be looked-up + * + * Returns: fwnode_handle pointer on success, NULL on failure + */ +static struct fwnode_handle *rimt_get_fwnode(struct acpi_rimt_node *node) +{ + struct fwnode_handle *fwnode = NULL; + struct rimt_fwnode *curr; + + spin_lock(&rimt_fwnode_lock); + list_for_each_entry(curr, &rimt_fwnode_list, list) { + if (curr->rimt_node == node) { + fwnode = curr->fwnode; + break; + } + } + spin_unlock(&rimt_fwnode_lock); + + return fwnode; +} + static bool rimt_pcie_rc_supports_ats(struct acpi_rimt_node *node) { struct acpi_rimt_pcie_rc *pci_rc; @@ -290,43 +327,6 @@ static struct acpi_rimt_node *rimt_node_get_id(struct acpi_rimt_node *node, return NULL; } -/* - * RISC-V supports IOMMU as a PCI device or a platform device. - * When it is a platform device, there should be a namespace device as - * well along with RIMT. To create the link between RIMT information and - * the platform device, the IOMMU driver should register itself with the - * RIMT module. This is true for PCI based IOMMU as well. - */ -int rimt_iommu_register(struct device *dev) -{ - struct fwnode_handle *rimt_fwnode; - struct acpi_rimt_node *node; - - node = rimt_scan_node(ACPI_RIMT_NODE_TYPE_IOMMU, dev); - if (!node) { - pr_err("Could not find IOMMU node in RIMT\n"); - return -ENODEV; - } - - if (dev_is_pci(dev)) { - rimt_fwnode = acpi_alloc_fwnode_static(); - if (!rimt_fwnode) - return -ENOMEM; - - rimt_fwnode->dev = dev; - if (!dev->fwnode) - dev->fwnode = rimt_fwnode; - - rimt_set_fwnode(node, rimt_fwnode); - } else { - rimt_set_fwnode(node, dev->fwnode); - } - - return 0; -} - -#ifdef CONFIG_IOMMU_API - static struct acpi_rimt_node *rimt_node_map_id(struct acpi_rimt_node *node, u32 id_in, u32 *id_out, u8 type_mask) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 8c99ceaa303b..a3a1b5c33ba3 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -851,17 +851,8 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong, } else { if (!internal) node->local_weak_refs++; - if (!node->has_weak_ref && list_empty(&node->work.entry)) { - if (target_list == NULL) { - pr_err("invalid inc weak node for %d\n", - node->debug_id); - return -EINVAL; - } - /* - * See comment above - */ + if (!node->has_weak_ref && target_list && list_empty(&node->work.entry)) binder_enqueue_work_ilocked(&node->work, target_list); - } } return 0; } @@ -2418,10 +2409,10 @@ static int binder_translate_fd(u32 fd, binder_size_t fd_offset, /** * struct binder_ptr_fixup - data to be fixed-up in target buffer - * @offset offset in target buffer to fixup - * @skip_size bytes to skip in copy (fixup will be written later) - * @fixup_data data to write at fixup offset - * @node list node + * @offset: offset in target buffer to fixup + * @skip_size: bytes to skip in copy (fixup will be written later) + * @fixup_data: data to write at fixup offset + * @node: list node * * This is used for the pointer fixup list (pf) which is created and consumed * during binder_transaction() and is only accessed locally. No @@ -2438,10 +2429,10 @@ struct binder_ptr_fixup { /** * struct binder_sg_copy - scatter-gather data to be copied - * @offset offset in target buffer - * @sender_uaddr user address in source buffer - * @length bytes to copy - * @node list node + * @offset: offset in target buffer + * @sender_uaddr: user address in source buffer + * @length: bytes to copy + * @node: list node * * This is used for the sg copy list (sgc) which is created and consumed * during binder_transaction() and is only accessed locally. No @@ -4063,14 +4054,15 @@ binder_freeze_notification_done(struct binder_proc *proc, /** * binder_free_buf() - free the specified buffer - * @proc: binder proc that owns buffer - * @buffer: buffer to be freed - * @is_failure: failed to send transaction + * @proc: binder proc that owns buffer + * @thread: binder thread performing the buffer release + * @buffer: buffer to be freed + * @is_failure: failed to send transaction * - * If buffer for an async transaction, enqueue the next async + * If the buffer is for an async transaction, enqueue the next async * transaction from the node. * - * Cleanup buffer and free it. + * Cleanup the buffer and free it. */ static void binder_free_buf(struct binder_proc *proc, diff --git a/drivers/android/binder/freeze.rs b/drivers/android/binder/freeze.rs index e68c3c8bc55a..220de35ae85a 100644 --- a/drivers/android/binder/freeze.rs +++ b/drivers/android/binder/freeze.rs @@ -106,13 +106,22 @@ fn do_work( return Ok(true); } if freeze.is_clearing { - _removed_listener = freeze_entry.remove_node(); + kernel::warn_on!(freeze.num_cleared_duplicates != 0); + if freeze.num_pending_duplicates > 0 { + // The primary freeze listener was deleted, so convert a pending duplicate back + // into the primary one. + freeze.num_pending_duplicates -= 1; + freeze.is_pending = true; + freeze.is_clearing = true; + } else { + _removed_listener = freeze_entry.remove_node(); + } drop(node_refs); writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?; writer.write_payload(&self.cookie.0)?; Ok(true) } else { - let is_frozen = freeze.node.owner.inner.lock().is_frozen; + let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen(); if freeze.last_is_frozen == Some(is_frozen) { return Ok(true); } @@ -245,8 +254,9 @@ pub(crate) fn freeze_notif_done(self: &Arc, reader: &mut UserSliceReader) ); return Err(EINVAL); } - if freeze.is_clearing { - // Immediately send another FreezeMessage for BR_CLEAR_FREEZE_NOTIFICATION_DONE. + let is_frozen = freeze.node.owner.inner.lock().is_frozen.is_fully_frozen(); + if freeze.is_clearing || freeze.last_is_frozen != Some(is_frozen) { + // Immediately send another FreezeMessage. clear_msg = Some(FreezeMessage::init(alloc, cookie)); } freeze.is_pending = false; diff --git a/drivers/android/binder/node.rs b/drivers/android/binder/node.rs index ade895ef791e..08d362deaf61 100644 --- a/drivers/android/binder/node.rs +++ b/drivers/android/binder/node.rs @@ -687,7 +687,7 @@ pub(crate) fn remove_freeze_listener(&self, p: &Arc) { ); } if inner.freeze_list.is_empty() { - _unused_capacity = mem::replace(&mut inner.freeze_list, KVVec::new()); + _unused_capacity = mem::take(&mut inner.freeze_list); } } diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs index f13a747e784c..7607353a5e92 100644 --- a/drivers/android/binder/process.rs +++ b/drivers/android/binder/process.rs @@ -72,6 +72,33 @@ fn new(address: usize, size: usize) -> Self { const PROC_DEFER_FLUSH: u8 = 1; const PROC_DEFER_RELEASE: u8 = 2; +#[derive(Copy, Clone)] +pub(crate) enum IsFrozen { + Yes, + No, + InProgress, +} + +impl IsFrozen { + /// Whether incoming transactions should be rejected due to freeze. + pub(crate) fn is_frozen(self) -> bool { + match self { + IsFrozen::Yes => true, + IsFrozen::No => false, + IsFrozen::InProgress => true, + } + } + + /// Whether freeze notifications consider this process frozen. + pub(crate) fn is_fully_frozen(self) -> bool { + match self { + IsFrozen::Yes => true, + IsFrozen::No => false, + IsFrozen::InProgress => false, + } + } +} + /// The fields of `Process` protected by the spinlock. pub(crate) struct ProcessInner { is_manager: bool, @@ -98,7 +125,7 @@ pub(crate) struct ProcessInner { /// are woken up. outstanding_txns: u32, /// Process is frozen and unable to service binder transactions. - pub(crate) is_frozen: bool, + pub(crate) is_frozen: IsFrozen, /// Process received sync transactions since last frozen. pub(crate) sync_recv: bool, /// Process received async transactions since last frozen. @@ -124,7 +151,7 @@ fn new() -> Self { started_thread_count: 0, defer_work: 0, outstanding_txns: 0, - is_frozen: false, + is_frozen: IsFrozen::No, sync_recv: false, async_recv: false, binderfs_file: None, @@ -1260,7 +1287,7 @@ fn deferred_release(self: Arc) { let is_manager = { let mut inner = self.inner.lock(); inner.is_dead = true; - inner.is_frozen = false; + inner.is_frozen = IsFrozen::No; inner.sync_recv = false; inner.async_recv = false; inner.is_manager @@ -1346,10 +1373,6 @@ fn deferred_release(self: Arc) { .alloc .take_for_each(|offset, size, debug_id, odata| { let ptr = offset + address; - pr_warn!( - "{}: removing orphan mapping {offset}:{size}\n", - self.pid_in_current_ns() - ); let mut alloc = Allocation::new(self.clone(), debug_id, offset, size, ptr, false); if let Some(data) = odata { @@ -1371,7 +1394,7 @@ pub(crate) fn drop_outstanding_txn(&self) { return; } inner.outstanding_txns -= 1; - inner.is_frozen && inner.outstanding_txns == 0 + inner.is_frozen.is_frozen() && inner.outstanding_txns == 0 }; if wake { @@ -1385,7 +1408,7 @@ pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result { let mut inner = self.inner.lock(); inner.sync_recv = false; inner.async_recv = false; - inner.is_frozen = false; + inner.is_frozen = IsFrozen::No; drop(inner); msgs.send_messages(); return Ok(()); @@ -1394,7 +1417,7 @@ pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result { let mut inner = self.inner.lock(); inner.sync_recv = false; inner.async_recv = false; - inner.is_frozen = true; + inner.is_frozen = IsFrozen::InProgress; if info.timeout_ms > 0 { let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms); @@ -1408,7 +1431,7 @@ pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result { .wait_interruptible_timeout(&mut inner, jiffies) { CondVarTimeoutResult::Signal { .. } => { - inner.is_frozen = false; + inner.is_frozen = IsFrozen::No; return Err(ERESTARTSYS); } CondVarTimeoutResult::Woken { jiffies: remaining } => { @@ -1422,17 +1445,18 @@ pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result { } if inner.txns_pending_locked() { - inner.is_frozen = false; + inner.is_frozen = IsFrozen::No; Err(EAGAIN) } else { drop(inner); match self.prepare_freeze_messages() { Ok(batch) => { + self.inner.lock().is_frozen = IsFrozen::Yes; batch.send_messages(); Ok(()) } Err(kernel::alloc::AllocError) => { - self.inner.lock().is_frozen = false; + self.inner.lock().is_frozen = IsFrozen::No; Err(ENOMEM) } } diff --git a/drivers/android/binder/transaction.rs b/drivers/android/binder/transaction.rs index 02512175d622..4bd3c0e417eb 100644 --- a/drivers/android/binder/transaction.rs +++ b/drivers/android/binder/transaction.rs @@ -249,7 +249,7 @@ pub(crate) fn submit(self: DLArc) -> BinderResult { if oneway { if let Some(target_node) = self.target_node.clone() { - if process_inner.is_frozen { + if process_inner.is_frozen.is_frozen() { process_inner.async_recv = true; if self.flags & TF_UPDATE_TXN != 0 { if let Some(t_outdated) = @@ -270,7 +270,7 @@ pub(crate) fn submit(self: DLArc) -> BinderResult { } } - if process_inner.is_frozen { + if process_inner.is_frozen.is_frozen() { return Err(BinderError::new_frozen_oneway()); } else { return Ok(()); @@ -280,7 +280,7 @@ pub(crate) fn submit(self: DLArc) -> BinderResult { } } - if process_inner.is_frozen { + if process_inner.is_frozen.is_frozen() { process_inner.sync_recv = true; return Err(BinderError::new_frozen()); } diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 1037169abb45..e1eff05bea4a 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -292,7 +292,7 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) * frequency (by keeping the initial capacity_freq_ref value). */ cpu_clk = of_clk_get(cpu_node, 0); - if (!PTR_ERR_OR_ZERO(cpu_clk)) { + if (!IS_ERR_OR_NULL(cpu_clk)) { per_cpu(capacity_freq_ref, cpu) = clk_get_rate(cpu_clk) / HZ_PER_KHZ; clk_put(cpu_clk); diff --git a/drivers/base/core.c b/drivers/base/core.c index 3c533dab8fa5..f69dc9c85954 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1784,7 +1784,7 @@ static int fw_devlink_dev_sync_state(struct device *dev, void *data) return 0; if (fw_devlink_sync_state == FW_DEVLINK_SYNC_STATE_STRICT) { - dev_warn(sup, "sync_state() pending due to %s\n", + dev_info(sup, "sync_state() pending due to %s\n", dev_name(link->consumer)); return 0; } diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c index 37faf6156d7c..55bdc7f5e59d 100644 --- a/drivers/base/devcoredump.c +++ b/drivers/base/devcoredump.c @@ -23,50 +23,46 @@ struct devcd_entry { void *data; size_t datalen; /* - * Here, mutex is required to serialize the calls to del_wk work between - * user/kernel space which happens when devcd is added with device_add() - * and that sends uevent to user space. User space reads the uevents, - * and calls to devcd_data_write() which try to modify the work which is - * not even initialized/queued from devcoredump. + * There are 2 races for which mutex is required. * + * The first race is between device creation and userspace writing to + * schedule immediately destruction. * + * This race is handled by arming the timer before device creation, but + * when device creation fails the timer still exists. * - * cpu0(X) cpu1(Y) + * To solve this, hold the mutex during device_add(), and set + * init_completed on success before releasing the mutex. * - * dev_coredump() uevent sent to user space - * device_add() ======================> user space process Y reads the - * uevents writes to devcd fd - * which results into writes to + * That way the timer will never fire until device_add() is called, + * it will do nothing if init_completed is not set. The timer is also + * cancelled in that case. * - * devcd_data_write() - * mod_delayed_work() - * try_to_grab_pending() - * timer_delete() - * debug_assert_init() - * INIT_DELAYED_WORK() - * schedule_delayed_work() - * - * - * Also, mutex alone would not be enough to avoid scheduling of - * del_wk work after it get flush from a call to devcd_free() - * mentioned as below. - * - * disabled_store() - * devcd_free() - * mutex_lock() devcd_data_write() - * flush_delayed_work() - * mutex_unlock() - * mutex_lock() - * mod_delayed_work() - * mutex_unlock() - * So, delete_work flag is required. + * The second race involves multiple parallel invocations of devcd_free(), + * add a deleted flag so only 1 can call the destructor. */ struct mutex mutex; - bool delete_work; + bool init_completed, deleted; struct module *owner; ssize_t (*read)(char *buffer, loff_t offset, size_t count, void *data, size_t datalen); void (*free)(void *data); + /* + * If nothing interferes and device_add() was returns success, + * del_wk will destroy the device after the timer fires. + * + * Multiple userspace processes can interfere in the working of the timer: + * - Writing to the coredump will reschedule the timer to run immediately, + * if still armed. + * + * This is handled by using "if (cancel_delayed_work()) { + * schedule_delayed_work() }", to prevent re-arming after having + * been previously fired. + * - Writing to /sys/class/devcoredump/disabled will destroy the + * coredump synchronously. + * This is handled by using disable_delayed_work_sync(), and then + * checking if deleted flag is set with &devcd->mutex held. + */ struct delayed_work del_wk; struct device *failing_dev; }; @@ -95,14 +91,27 @@ static void devcd_dev_release(struct device *dev) kfree(devcd); } +static void __devcd_del(struct devcd_entry *devcd) +{ + devcd->deleted = true; + device_del(&devcd->devcd_dev); + put_device(&devcd->devcd_dev); +} + static void devcd_del(struct work_struct *wk) { struct devcd_entry *devcd; + bool init_completed; devcd = container_of(wk, struct devcd_entry, del_wk.work); - device_del(&devcd->devcd_dev); - put_device(&devcd->devcd_dev); + /* devcd->mutex serializes against dev_coredumpm_timeout */ + mutex_lock(&devcd->mutex); + init_completed = devcd->init_completed; + mutex_unlock(&devcd->mutex); + + if (init_completed) + __devcd_del(devcd); } static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj, @@ -122,12 +131,12 @@ static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct devcd_entry *devcd = dev_to_devcd(dev); - mutex_lock(&devcd->mutex); - if (!devcd->delete_work) { - devcd->delete_work = true; - mod_delayed_work(system_wq, &devcd->del_wk, 0); - } - mutex_unlock(&devcd->mutex); + /* + * Although it's tempting to use mod_delayed work here, + * that will cause a reschedule if the timer already fired. + */ + if (cancel_delayed_work(&devcd->del_wk)) + schedule_delayed_work(&devcd->del_wk, 0); return count; } @@ -151,11 +160,21 @@ static int devcd_free(struct device *dev, void *data) { struct devcd_entry *devcd = dev_to_devcd(dev); + /* + * To prevent a race with devcd_data_write(), disable work and + * complete manually instead. + * + * We cannot rely on the return value of + * disable_delayed_work_sync() here, because it might be in the + * middle of a cancel_delayed_work + schedule_delayed_work pair. + * + * devcd->mutex here guards against multiple parallel invocations + * of devcd_free(). + */ + disable_delayed_work_sync(&devcd->del_wk); mutex_lock(&devcd->mutex); - if (!devcd->delete_work) - devcd->delete_work = true; - - flush_delayed_work(&devcd->del_wk); + if (!devcd->deleted) + __devcd_del(devcd); mutex_unlock(&devcd->mutex); return 0; } @@ -179,12 +198,10 @@ static ssize_t disabled_show(const struct class *class, const struct class_attri * put_device() <- last reference * error = fn(dev, data) devcd_dev_release() * devcd_free(dev, data) kfree(devcd) - * mutex_lock(&devcd->mutex); * * * In the above diagram, it looks like disabled_store() would be racing with parallelly - * running devcd_del() and result in memory abort while acquiring devcd->mutex which - * is called after kfree of devcd memory after dropping its last reference with + * running devcd_del() and result in memory abort after dropping its last reference with * put_device(). However, this will not happens as fn(dev, data) runs * with its own reference to device via klist_node so it is not its last reference. * so, above situation would not occur. @@ -374,7 +391,7 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner, devcd->read = read; devcd->free = free; devcd->failing_dev = get_device(dev); - devcd->delete_work = false; + devcd->deleted = false; mutex_init(&devcd->mutex); device_initialize(&devcd->devcd_dev); @@ -383,8 +400,14 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner, atomic_inc_return(&devcd_count)); devcd->devcd_dev.class = &devcd_class; - mutex_lock(&devcd->mutex); dev_set_uevent_suppress(&devcd->devcd_dev, true); + + /* devcd->mutex prevents devcd_del() completing until init finishes */ + mutex_lock(&devcd->mutex); + devcd->init_completed = false; + INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); + schedule_delayed_work(&devcd->del_wk, timeout); + if (device_add(&devcd->devcd_dev)) goto put_device; @@ -401,13 +424,20 @@ void dev_coredumpm_timeout(struct device *dev, struct module *owner, dev_set_uevent_suppress(&devcd->devcd_dev, false); kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD); - INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); - schedule_delayed_work(&devcd->del_wk, timeout); + + /* + * Safe to run devcd_del() now that we are done with devcd_dev. + * Alternatively we could have taken a ref on devcd_dev before + * dropping the lock. + */ + devcd->init_completed = true; mutex_unlock(&devcd->mutex); return; put_device: - put_device(&devcd->devcd_dev); mutex_unlock(&devcd->mutex); + cancel_delayed_work_sync(&devcd->del_wk); + put_device(&devcd->devcd_dev); + put_module: module_put(owner); free: diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index 6ecfc821cf83..72f045e6ed51 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -294,6 +294,8 @@ static int bcma_register_devices(struct bcma_bus *bus) int err; list_for_each_entry(core, &bus->cores, list) { + struct device_node *np; + /* We support that core ourselves */ switch (core->id.id) { case BCMA_CORE_4706_CHIPCOMMON: @@ -311,6 +313,10 @@ static int bcma_register_devices(struct bcma_bus *bus) if (bcma_is_core_needed_early(core->id.id)) continue; + np = core->dev.of_node; + if (np && !of_device_is_available(np)) + continue; + /* Only first GMAC core on BCM4706 is connected and working */ if (core->id.id == BCMA_CORE_4706_MAC_GBIT && core->core_unit > 0) diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 1188f32a5e5e..a853c65ac65d 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -52,6 +52,7 @@ static DEFINE_IDR(nbd_index_idr); static DEFINE_MUTEX(nbd_index_mutex); static struct workqueue_struct *nbd_del_wq; +static struct cred *nbd_cred; static int nbd_total_devices = 0; struct nbd_sock { @@ -554,6 +555,7 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send, int result; struct msghdr msg = {} ; unsigned int noreclaim_flag; + const struct cred *old_cred; if (unlikely(!sock)) { dev_err_ratelimited(disk_to_dev(nbd->disk), @@ -562,6 +564,8 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send, return -EINVAL; } + old_cred = override_creds(nbd_cred); + msg.msg_iter = *iter; noreclaim_flag = memalloc_noreclaim_save(); @@ -586,6 +590,8 @@ static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send, memalloc_noreclaim_restore(noreclaim_flag); + revert_creds(old_cred); + return result; } @@ -2677,7 +2683,15 @@ static int __init nbd_init(void) return -ENOMEM; } + nbd_cred = prepare_kernel_cred(&init_task); + if (!nbd_cred) { + destroy_workqueue(nbd_del_wq); + unregister_blkdev(NBD_MAJOR, "nbd"); + return -ENOMEM; + } + if (genl_register_family(&nbd_genl_family)) { + put_cred(nbd_cred); destroy_workqueue(nbd_del_wq); unregister_blkdev(NBD_MAJOR, "nbd"); return -EINVAL; @@ -2732,6 +2746,7 @@ static void __exit nbd_cleanup(void) /* Also wait for nbd_dev_remove_work() completes */ destroy_workqueue(nbd_del_wq); + put_cred(nbd_cred); idr_destroy(&nbd_index_idr); unregister_blkdev(NBD_MAJOR, "nbd"); } diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c index b7ba667a3d09..e305d04aac9d 100644 --- a/drivers/bluetooth/bpa10x.c +++ b/drivers/bluetooth/bpa10x.c @@ -41,6 +41,7 @@ struct bpa10x_data { struct usb_anchor rx_anchor; struct sk_buff *rx_skb[2]; + struct hci_uart hu; }; static void bpa10x_tx_complete(struct urb *urb) @@ -96,7 +97,7 @@ static void bpa10x_rx_complete(struct urb *urb) if (urb->status == 0) { bool idx = usb_pipebulk(urb->pipe); - data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx], + data->rx_skb[idx] = h4_recv_buf(&data->hu, data->rx_skb[idx], urb->transfer_buffer, urb->actual_length, bpa10x_recv_pkts, @@ -388,6 +389,7 @@ static int bpa10x_probe(struct usb_interface *intf, hci_set_drvdata(hdev, data); data->hdev = hdev; + data->hu.hdev = hdev; SET_HCIDEV_DEV(hdev, &intf->dev); diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c index 6d3963bd56a9..a075d8ec4677 100644 --- a/drivers/bluetooth/btintel_pcie.c +++ b/drivers/bluetooth/btintel_pcie.c @@ -1467,11 +1467,6 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id) if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1) btintel_pcie_msix_gp1_handler(data); - /* This interrupt is triggered by the firmware after updating - * boot_stage register and image_response register - */ - if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0) - btintel_pcie_msix_gp0_handler(data); /* For TX */ if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) { @@ -1487,6 +1482,12 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id) btintel_pcie_msix_tx_handle(data); } + /* This interrupt is triggered by the firmware after updating + * boot_stage register and image_response register + */ + if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0) + btintel_pcie_msix_gp0_handler(data); + /* * Before sending the interrupt the HW disables it to prevent a nested * interrupt. This is done by writing 1 to the corresponding bit in diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index 50abefba6d04..62db31bd6592 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -1270,6 +1270,12 @@ static void btmtksdio_reset(struct hci_dev *hdev) sdio_claim_host(bdev->func); + /* set drv_pmctrl if BT is closed before doing reset */ + if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) { + sdio_enable_func(bdev->func); + btmtksdio_drv_pmctrl(bdev); + } + sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL); skb_queue_purge(&bdev->txq); cancel_work_sync(&bdev->txrx_work); @@ -1285,6 +1291,12 @@ static void btmtksdio_reset(struct hci_dev *hdev) goto err; } + /* set fw_pmctrl back if BT is closed after doing reset */ + if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) { + btmtksdio_fw_pmctrl(bdev); + sdio_disable_func(bdev->func); + } + clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state); err: sdio_release_host(bdev->func); diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c index d9b90ea2ad38..27aa48ff3ac2 100644 --- a/drivers/bluetooth/btmtkuart.c +++ b/drivers/bluetooth/btmtkuart.c @@ -79,6 +79,7 @@ struct btmtkuart_dev { u16 stp_dlen; const struct btmtkuart_data *data; + struct hci_uart hu; }; #define btmtkuart_is_standalone(bdev) \ @@ -368,7 +369,7 @@ static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count) sz_left -= adv; p_left += adv; - bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4, + bdev->rx_skb = h4_recv_buf(&bdev->hu, bdev->rx_skb, p_h4, sz_h4, mtk_recv_pkts, ARRAY_SIZE(mtk_recv_pkts)); if (IS_ERR(bdev->rx_skb)) { @@ -858,6 +859,7 @@ static int btmtkuart_probe(struct serdev_device *serdev) } bdev->hdev = hdev; + bdev->hu.hdev = hdev; hdev->bus = HCI_UART; hci_set_drvdata(hdev, bdev); diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index d5153fed0518..3b1e9224e965 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -212,6 +212,7 @@ struct btnxpuart_dev { struct ps_data psdata; struct btnxpuart_data *nxp_data; struct reset_control *pdn; + struct hci_uart hu; }; #define NXP_V1_FW_REQ_PKT 0xa5 @@ -1756,7 +1757,7 @@ static size_t btnxpuart_receive_buf(struct serdev_device *serdev, ps_start_timer(nxpdev); - nxpdev->rx_skb = h4_recv_buf(nxpdev->hdev, nxpdev->rx_skb, data, count, + nxpdev->rx_skb = h4_recv_buf(&nxpdev->hu, nxpdev->rx_skb, data, count, nxp_recv_pkts, ARRAY_SIZE(nxp_recv_pkts)); if (IS_ERR(nxpdev->rx_skb)) { int err = PTR_ERR(nxpdev->rx_skb); @@ -1875,6 +1876,7 @@ static int nxp_serdev_probe(struct serdev_device *serdev) reset_control_deassert(nxpdev->pdn); nxpdev->hdev = hdev; + nxpdev->hu.hdev = hdev; hdev->bus = HCI_UART; hci_set_drvdata(hdev, nxpdev); diff --git a/drivers/bluetooth/hci_ag6xx.c b/drivers/bluetooth/hci_ag6xx.c index 2d40302409ff..94588676510f 100644 --- a/drivers/bluetooth/hci_ag6xx.c +++ b/drivers/bluetooth/hci_ag6xx.c @@ -105,7 +105,7 @@ static int ag6xx_recv(struct hci_uart *hu, const void *data, int count) if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; - ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count, + ag6xx->rx_skb = h4_recv_buf(hu, ag6xx->rx_skb, data, count, ag6xx_recv_pkts, ARRAY_SIZE(ag6xx_recv_pkts)); if (IS_ERR(ag6xx->rx_skb)) { diff --git a/drivers/bluetooth/hci_aml.c b/drivers/bluetooth/hci_aml.c index 707e90f80130..b1f32c5a8a3f 100644 --- a/drivers/bluetooth/hci_aml.c +++ b/drivers/bluetooth/hci_aml.c @@ -650,7 +650,7 @@ static int aml_recv(struct hci_uart *hu, const void *data, int count) struct aml_data *aml_data = hu->priv; int err; - aml_data->rx_skb = h4_recv_buf(hu->hdev, aml_data->rx_skb, data, count, + aml_data->rx_skb = h4_recv_buf(hu, aml_data->rx_skb, data, count, aml_recv_pkts, ARRAY_SIZE(aml_recv_pkts)); if (IS_ERR(aml_data->rx_skb)) { diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c index dbfe34664633..8d2b5e7f0d6a 100644 --- a/drivers/bluetooth/hci_ath.c +++ b/drivers/bluetooth/hci_ath.c @@ -191,7 +191,7 @@ static int ath_recv(struct hci_uart *hu, const void *data, int count) { struct ath_struct *ath = hu->priv; - ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count, + ath->rx_skb = h4_recv_buf(hu, ath->rx_skb, data, count, ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts)); if (IS_ERR(ath->rx_skb)) { int err = PTR_ERR(ath->rx_skb); diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index f96617b85d87..fff845ed44e3 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -698,7 +698,7 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count) if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; - bcm->rx_skb = h4_recv_buf(hu->hdev, bcm->rx_skb, data, count, + bcm->rx_skb = h4_recv_buf(hu, bcm->rx_skb, data, count, bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts)); if (IS_ERR(bcm->rx_skb)) { int err = PTR_ERR(bcm->rx_skb); diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c index 9070e31a68bf..ec017df8572c 100644 --- a/drivers/bluetooth/hci_h4.c +++ b/drivers/bluetooth/hci_h4.c @@ -112,7 +112,7 @@ static int h4_recv(struct hci_uart *hu, const void *data, int count) if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; - h4->rx_skb = h4_recv_buf(hu->hdev, h4->rx_skb, data, count, + h4->rx_skb = h4_recv_buf(hu, h4->rx_skb, data, count, h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts)); if (IS_ERR(h4->rx_skb)) { int err = PTR_ERR(h4->rx_skb); @@ -151,12 +151,12 @@ int __exit h4_deinit(void) return hci_uart_unregister_proto(&h4p); } -struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, +struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb, const unsigned char *buffer, int count, const struct h4_recv_pkt *pkts, int pkts_count) { - struct hci_uart *hu = hci_get_drvdata(hdev); u8 alignment = hu->alignment ? hu->alignment : 1; + struct hci_dev *hdev = hu->hdev; /* Check for error from previous call */ if (IS_ERR(skb)) diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index 9b353c3d6442..1d6e09508f1f 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -972,7 +972,7 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count) if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; - intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count, + intel->rx_skb = h4_recv_buf(hu, intel->rx_skb, data, count, intel_recv_pkts, ARRAY_SIZE(intel_recv_pkts)); if (IS_ERR(intel->rx_skb)) { diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index 7044c86325ce..6f4e25917b86 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -429,7 +429,7 @@ static int ll_recv(struct hci_uart *hu, const void *data, int count) if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; - ll->rx_skb = h4_recv_buf(hu->hdev, ll->rx_skb, data, count, + ll->rx_skb = h4_recv_buf(hu, ll->rx_skb, data, count, ll_recv_pkts, ARRAY_SIZE(ll_recv_pkts)); if (IS_ERR(ll->rx_skb)) { int err = PTR_ERR(ll->rx_skb); diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c index e08222395772..8767522ec4c6 100644 --- a/drivers/bluetooth/hci_mrvl.c +++ b/drivers/bluetooth/hci_mrvl.c @@ -264,9 +264,9 @@ static int mrvl_recv(struct hci_uart *hu, const void *data, int count) !test_bit(STATE_FW_LOADED, &mrvl->flags)) return count; - mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count, - mrvl_recv_pkts, - ARRAY_SIZE(mrvl_recv_pkts)); + mrvl->rx_skb = h4_recv_buf(hu, mrvl->rx_skb, data, count, + mrvl_recv_pkts, + ARRAY_SIZE(mrvl_recv_pkts)); if (IS_ERR(mrvl->rx_skb)) { int err = PTR_ERR(mrvl->rx_skb); bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c index cd7575c20f65..1e65b541f8ad 100644 --- a/drivers/bluetooth/hci_nokia.c +++ b/drivers/bluetooth/hci_nokia.c @@ -624,8 +624,8 @@ static int nokia_recv(struct hci_uart *hu, const void *data, int count) if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; - btdev->rx_skb = h4_recv_buf(hu->hdev, btdev->rx_skb, data, count, - nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts)); + btdev->rx_skb = h4_recv_buf(hu, btdev->rx_skb, data, count, + nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts)); if (IS_ERR(btdev->rx_skb)) { err = PTR_ERR(btdev->rx_skb); dev_err(dev, "Frame reassembly failed (%d)", err); diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 4cff4d9be313..888176b0faa9 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -1277,7 +1277,7 @@ static int qca_recv(struct hci_uart *hu, const void *data, int count) if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; - qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count, + qca->rx_skb = h4_recv_buf(hu, qca->rx_skb, data, count, qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts)); if (IS_ERR(qca->rx_skb)) { int err = PTR_ERR(qca->rx_skb); diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index cbbe79b241ce..48ac7ca9334e 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -162,7 +162,7 @@ struct h4_recv_pkt { int h4_init(void); int h4_deinit(void); -struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, +struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb, const unsigned char *buffer, int count, const struct h4_recv_pkt *pkts, int pkts_count); #endif diff --git a/drivers/comedi/comedi_buf.c b/drivers/comedi/comedi_buf.c index 002c0e76baff..c7c262a2d8ca 100644 --- a/drivers/comedi/comedi_buf.c +++ b/drivers/comedi/comedi_buf.c @@ -317,7 +317,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s, unsigned int count = 0; const unsigned int num_sample_bytes = comedi_bytes_per_sample(s); - if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) { + if (!s->munge || (async->cmd.flags & CMDF_RAWDATA) || async->cmd.chanlist_len == 0) { async->munge_count += num_bytes; return num_bytes; } diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c index 74c1f0ca95f2..a4153bcb6dcf 100644 --- a/drivers/dpll/dpll_netlink.c +++ b/drivers/dpll/dpll_netlink.c @@ -1559,16 +1559,18 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info) return -EMSGSIZE; } pin = dpll_pin_find_from_nlattr(info); - if (!IS_ERR(pin)) { - if (!dpll_pin_available(pin)) { - nlmsg_free(msg); - return -ENODEV; - } - ret = dpll_msg_add_pin_handle(msg, pin); - if (ret) { - nlmsg_free(msg); - return ret; - } + if (IS_ERR(pin)) { + nlmsg_free(msg); + return PTR_ERR(pin); + } + if (!dpll_pin_available(pin)) { + nlmsg_free(msg); + return -ENODEV; + } + ret = dpll_msg_add_pin_handle(msg, pin); + if (ret) { + nlmsg_free(msg); + return ret; } genlmsg_end(msg, hdr); @@ -1735,12 +1737,14 @@ int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info) } dpll = dpll_device_find_from_nlattr(info); - if (!IS_ERR(dpll)) { - ret = dpll_msg_add_dev_handle(msg, dpll); - if (ret) { - nlmsg_free(msg); - return ret; - } + if (IS_ERR(dpll)) { + nlmsg_free(msg); + return PTR_ERR(dpll); + } + ret = dpll_msg_add_dev_handle(msg, dpll); + if (ret) { + nlmsg_free(msg); + return ret; } genlmsg_end(msg, hdr); diff --git a/drivers/dpll/zl3073x/dpll.c b/drivers/dpll/zl3073x/dpll.c index 93dc93eec79e..f93f9a458324 100644 --- a/drivers/dpll/zl3073x/dpll.c +++ b/drivers/dpll/zl3073x/dpll.c @@ -1904,7 +1904,7 @@ zl3073x_dpll_pin_is_registrable(struct zl3073x_dpll *zldpll, } is_diff = zl3073x_out_is_diff(zldev, out); - is_enabled = zl3073x_out_is_enabled(zldev, out); + is_enabled = zl3073x_output_pin_is_enabled(zldev, index); } /* Skip N-pin if the corresponding input/output is differential */ diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index dd3656a0c1ff..c65f491c54d0 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -269,7 +269,7 @@ static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, } static int allocate_tlabel(struct fw_card *card) -__must_hold(&card->transactions_lock) +__must_hold(&card->transactions.lock) { int tlabel; diff --git a/drivers/firewire/init_ohci1394_dma.c b/drivers/firewire/init_ohci1394_dma.c index 48b879e9e831..121f0c2f6401 100644 --- a/drivers/firewire/init_ohci1394_dma.c +++ b/drivers/firewire/init_ohci1394_dma.c @@ -167,6 +167,7 @@ static inline void __init init_ohci1394_initialize(struct ohci *ohci) /** * init_ohci1394_wait_for_busresets - wait until bus resets are completed + * @ohci: Pointer to the OHCI-1394 controller structure * * OHCI1394 initialization itself and any device going on- or offline * and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec @@ -189,6 +190,8 @@ static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci) /** * init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging + * @ohci: Pointer to the OHCI-1394 controller structure + * * This enables remote DMA access over IEEE1394 from every host for the low * 4GB of address space. DMA accesses above 4GB are not available currently. */ @@ -201,6 +204,8 @@ static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci) /** * init_ohci1394_reset_and_init_dma - init controller and enable DMA + * @ohci: Pointer to the OHCI-1394 controller structure + * * This initializes the given controller and enables physical DMA engine in it. */ static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci) @@ -230,6 +235,10 @@ static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci) /** * init_ohci1394_controller - Map the registers of the controller and init DMA + * @num: PCI bus number + * @slot: PCI device number + * @func: PCI function number + * * This maps the registers of the specified controller and initializes it */ static inline void __init init_ohci1394_controller(int num, int slot, int func) @@ -284,6 +293,7 @@ void __init init_ohci1394_dma_on_all_controllers(void) /** * setup_ohci1394_dma - enables early OHCI1394 DMA initialization + * @opt: Kernel command line parameter string */ static int __init setup_ohci1394_dma(char *opt) { diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 65bf1685350a..c72ee4756585 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -649,6 +649,26 @@ static u16 ffa_memory_attributes_get(u32 func_id) return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE; } +static void ffa_emad_impdef_value_init(u32 version, void *dst, void *src) +{ + struct ffa_mem_region_attributes *ep_mem_access; + + if (FFA_EMAD_HAS_IMPDEF_FIELD(version)) + memcpy(dst, src, sizeof(ep_mem_access->impdef_val)); +} + +static void +ffa_mem_region_additional_setup(u32 version, struct ffa_mem_region *mem_region) +{ + if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version)) { + mem_region->ep_mem_size = 0; + } else { + mem_region->ep_mem_size = ffa_emad_size_get(version); + mem_region->ep_mem_offset = sizeof(*mem_region); + memset(mem_region->reserved, 0, 12); + } +} + static int ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize, struct ffa_mem_ops_args *args) @@ -667,27 +687,24 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize, mem_region->flags = args->flags; mem_region->sender_id = drv_info->vm_id; mem_region->attributes = ffa_memory_attributes_get(func_id); - ep_mem_access = buffer + - ffa_mem_desc_offset(buffer, 0, drv_info->version); composite_offset = ffa_mem_desc_offset(buffer, args->nattrs, drv_info->version); - for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) { + for (idx = 0; idx < args->nattrs; idx++) { + ep_mem_access = buffer + + ffa_mem_desc_offset(buffer, idx, drv_info->version); ep_mem_access->receiver = args->attrs[idx].receiver; ep_mem_access->attrs = args->attrs[idx].attrs; ep_mem_access->composite_off = composite_offset; ep_mem_access->flag = 0; ep_mem_access->reserved = 0; + ffa_emad_impdef_value_init(drv_info->version, + ep_mem_access->impdef_val, + args->attrs[idx].impdef_val); } mem_region->handle = 0; mem_region->ep_count = args->nattrs; - if (drv_info->version <= FFA_VERSION_1_0) { - mem_region->ep_mem_size = 0; - } else { - mem_region->ep_mem_size = sizeof(*ep_mem_access); - mem_region->ep_mem_offset = sizeof(*mem_region); - memset(mem_region->reserved, 0, 12); - } + ffa_mem_region_additional_setup(drv_info->version, mem_region); composite = buffer + composite_offset; composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg); diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 07b9e629276d..7c35c95fddba 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -309,16 +309,36 @@ enum debug_counters { SCMI_DEBUG_COUNTERS_LAST }; -static inline void scmi_inc_count(atomic_t *arr, int stat) +/** + * struct scmi_debug_info - Debug common info + * @top_dentry: A reference to the top debugfs dentry + * @name: Name of this SCMI instance + * @type: Type of this SCMI instance + * @is_atomic: Flag to state if the transport of this instance is atomic + * @counters: An array of atomic_c's used for tracking statistics (if enabled) + */ +struct scmi_debug_info { + struct dentry *top_dentry; + const char *name; + const char *type; + bool is_atomic; + atomic_t counters[SCMI_DEBUG_COUNTERS_LAST]; +}; + +static inline void scmi_inc_count(struct scmi_debug_info *dbg, int stat) { - if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) - atomic_inc(&arr[stat]); + if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) { + if (dbg) + atomic_inc(&dbg->counters[stat]); + } } -static inline void scmi_dec_count(atomic_t *arr, int stat) +static inline void scmi_dec_count(struct scmi_debug_info *dbg, int stat) { - if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) - atomic_dec(&arr[stat]); + if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) { + if (dbg) + atomic_dec(&dbg->counters[stat]); + } } enum scmi_bad_msg { diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index bd56a877fdfc..5caa9191a8d1 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -115,22 +115,6 @@ struct scmi_protocol_instance { #define ph_to_pi(h) container_of(h, struct scmi_protocol_instance, ph) -/** - * struct scmi_debug_info - Debug common info - * @top_dentry: A reference to the top debugfs dentry - * @name: Name of this SCMI instance - * @type: Type of this SCMI instance - * @is_atomic: Flag to state if the transport of this instance is atomic - * @counters: An array of atomic_c's used for tracking statistics (if enabled) - */ -struct scmi_debug_info { - struct dentry *top_dentry; - const char *name; - const char *type; - bool is_atomic; - atomic_t counters[SCMI_DEBUG_COUNTERS_LAST]; -}; - /** * struct scmi_info - Structure representing a SCMI instance * @@ -610,7 +594,7 @@ scmi_xfer_inflight_register_unlocked(struct scmi_xfer *xfer, /* Set in-flight */ set_bit(xfer->hdr.seq, minfo->xfer_alloc_table); hash_add(minfo->pending_xfers, &xfer->node, xfer->hdr.seq); - scmi_inc_count(info->dbg->counters, XFERS_INFLIGHT); + scmi_inc_count(info->dbg, XFERS_INFLIGHT); xfer->pending = true; } @@ -819,8 +803,9 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer) hash_del(&xfer->node); xfer->pending = false; - scmi_dec_count(info->dbg->counters, XFERS_INFLIGHT); + scmi_dec_count(info->dbg, XFERS_INFLIGHT); } + xfer->flags = 0; hlist_add_head(&xfer->node, &minfo->free_xfers); } spin_unlock_irqrestore(&minfo->xfer_lock, flags); @@ -839,8 +824,6 @@ void scmi_xfer_raw_put(const struct scmi_handle *handle, struct scmi_xfer *xfer) { struct scmi_info *info = handle_to_scmi_info(handle); - xfer->flags &= ~SCMI_XFER_FLAG_IS_RAW; - xfer->flags &= ~SCMI_XFER_FLAG_CHAN_SET; return __scmi_xfer_put(&info->tx_minfo, xfer); } @@ -1034,7 +1017,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) spin_unlock_irqrestore(&minfo->xfer_lock, flags); scmi_bad_message_trace(cinfo, msg_hdr, MSG_UNEXPECTED); - scmi_inc_count(info->dbg->counters, ERR_MSG_UNEXPECTED); + scmi_inc_count(info->dbg, ERR_MSG_UNEXPECTED); return xfer; } @@ -1062,7 +1045,7 @@ scmi_xfer_command_acquire(struct scmi_chan_info *cinfo, u32 msg_hdr) msg_type, xfer_id, msg_hdr, xfer->state); scmi_bad_message_trace(cinfo, msg_hdr, MSG_INVALID); - scmi_inc_count(info->dbg->counters, ERR_MSG_INVALID); + scmi_inc_count(info->dbg, ERR_MSG_INVALID); /* On error the refcount incremented above has to be dropped */ __scmi_xfer_put(minfo, xfer); @@ -1107,7 +1090,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, PTR_ERR(xfer)); scmi_bad_message_trace(cinfo, msg_hdr, MSG_NOMEM); - scmi_inc_count(info->dbg->counters, ERR_MSG_NOMEM); + scmi_inc_count(info->dbg, ERR_MSG_NOMEM); scmi_clear_channel(info, cinfo); return; @@ -1123,7 +1106,7 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, xfer->hdr.id, "NOTI", xfer->hdr.seq, xfer->hdr.status, xfer->rx.buf, xfer->rx.len); - scmi_inc_count(info->dbg->counters, NOTIFICATION_OK); + scmi_inc_count(info->dbg, NOTIFICATION_OK); scmi_notify(cinfo->handle, xfer->hdr.protocol_id, xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); @@ -1183,10 +1166,10 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, if (xfer->hdr.type == MSG_TYPE_DELAYED_RESP) { scmi_clear_channel(info, cinfo); complete(xfer->async_done); - scmi_inc_count(info->dbg->counters, DELAYED_RESPONSE_OK); + scmi_inc_count(info->dbg, DELAYED_RESPONSE_OK); } else { complete(&xfer->done); - scmi_inc_count(info->dbg->counters, RESPONSE_OK); + scmi_inc_count(info->dbg, RESPONSE_OK); } if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { @@ -1296,7 +1279,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, "timed out in resp(caller: %pS) - polling\n", (void *)_RET_IP_); ret = -ETIMEDOUT; - scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_POLLED_TIMEOUT); + scmi_inc_count(info->dbg, XFERS_RESPONSE_POLLED_TIMEOUT); } } @@ -1321,7 +1304,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, "RESP" : "resp", xfer->hdr.seq, xfer->hdr.status, xfer->rx.buf, xfer->rx.len); - scmi_inc_count(info->dbg->counters, RESPONSE_POLLED_OK); + scmi_inc_count(info->dbg, RESPONSE_POLLED_OK); if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { scmi_raw_message_report(info->raw, xfer, @@ -1336,7 +1319,7 @@ static int scmi_wait_for_reply(struct device *dev, const struct scmi_desc *desc, dev_err(dev, "timed out in resp(caller: %pS)\n", (void *)_RET_IP_); ret = -ETIMEDOUT; - scmi_inc_count(info->dbg->counters, XFERS_RESPONSE_TIMEOUT); + scmi_inc_count(info->dbg, XFERS_RESPONSE_TIMEOUT); } } @@ -1420,13 +1403,13 @@ static int do_xfer(const struct scmi_protocol_handle *ph, !is_transport_polling_capable(info->desc)) { dev_warn_once(dev, "Polling mode is not supported by transport.\n"); - scmi_inc_count(info->dbg->counters, SENT_FAIL_POLLING_UNSUPPORTED); + scmi_inc_count(info->dbg, SENT_FAIL_POLLING_UNSUPPORTED); return -EINVAL; } cinfo = idr_find(&info->tx_idr, pi->proto->id); if (unlikely(!cinfo)) { - scmi_inc_count(info->dbg->counters, SENT_FAIL_CHANNEL_NOT_FOUND); + scmi_inc_count(info->dbg, SENT_FAIL_CHANNEL_NOT_FOUND); return -EINVAL; } /* True ONLY if also supported by transport. */ @@ -1461,19 +1444,19 @@ static int do_xfer(const struct scmi_protocol_handle *ph, ret = info->desc->ops->send_message(cinfo, xfer); if (ret < 0) { dev_dbg(dev, "Failed to send message %d\n", ret); - scmi_inc_count(info->dbg->counters, SENT_FAIL); + scmi_inc_count(info->dbg, SENT_FAIL); return ret; } trace_scmi_msg_dump(info->id, cinfo->id, xfer->hdr.protocol_id, xfer->hdr.id, "CMND", xfer->hdr.seq, xfer->hdr.status, xfer->tx.buf, xfer->tx.len); - scmi_inc_count(info->dbg->counters, SENT_OK); + scmi_inc_count(info->dbg, SENT_OK); ret = scmi_wait_for_message_response(cinfo, xfer); if (!ret && xfer->hdr.status) { ret = scmi_to_linux_errno(xfer->hdr.status); - scmi_inc_count(info->dbg->counters, ERR_PROTOCOL); + scmi_inc_count(info->dbg, ERR_PROTOCOL); } if (info->desc->ops->mark_txdone) @@ -3044,9 +3027,6 @@ static int scmi_debugfs_raw_mode_setup(struct scmi_info *info) u8 channels[SCMI_MAX_CHANNELS] = {}; DECLARE_BITMAP(protos, SCMI_MAX_CHANNELS) = {}; - if (!info->dbg) - return -EINVAL; - /* Enumerate all channels to collect their ids */ idr_for_each_entry(&info->tx_idr, cinfo, id) { /* @@ -3218,7 +3198,7 @@ static int scmi_probe(struct platform_device *pdev) if (!info->dbg) dev_warn(dev, "Failed to setup SCMI debugfs.\n"); - if (IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { + if (info->dbg && IS_ENABLED(CONFIG_ARM_SCMI_RAW_MODE_SUPPORT)) { ret = scmi_debugfs_raw_mode_setup(info); if (!coex) { if (ret) @@ -3423,6 +3403,9 @@ int scmi_inflight_count(const struct scmi_handle *handle) if (IS_ENABLED(CONFIG_ARM_SCMI_DEBUG_COUNTERS)) { struct scmi_info *info = handle_to_scmi_info(handle); + if (!info->dbg) + return 0; + return atomic_read(&info->dbg->counters[XFERS_INFLIGHT]); } else { return 0; diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c index ffe7e1cb6b23..fe5c10cd5c32 100644 --- a/drivers/gpio/gpio-104-idio-16.c +++ b/drivers/gpio/gpio-104-idio-16.c @@ -59,6 +59,7 @@ static const struct regmap_config idio_16_regmap_config = { .reg_stride = 1, .val_bits = 8, .io_port = true, + .max_register = 0x5, .wr_table = &idio_16_wr_table, .rd_table = &idio_16_rd_table, .volatile_table = &idio_16_rd_table, diff --git a/drivers/gpio/gpio-idio-16.c b/drivers/gpio/gpio-idio-16.c index 0103be977c66..4fbae6f6a497 100644 --- a/drivers/gpio/gpio-idio-16.c +++ b/drivers/gpio/gpio-idio-16.c @@ -6,6 +6,7 @@ #define DEFAULT_SYMBOL_NAMESPACE "GPIO_IDIO_16" +#include #include #include #include @@ -107,6 +108,7 @@ int devm_idio_16_regmap_register(struct device *const dev, struct idio_16_data *data; struct regmap_irq_chip *chip; struct regmap_irq_chip_data *chip_data; + DECLARE_BITMAP(fixed_direction_output, IDIO_16_NGPIO); if (!config->parent) return -EINVAL; @@ -164,6 +166,9 @@ int devm_idio_16_regmap_register(struct device *const dev, gpio_config.irq_domain = regmap_irq_get_domain(chip_data); gpio_config.reg_mask_xlate = idio_16_reg_mask_xlate; + bitmap_from_u64(fixed_direction_output, GENMASK_U64(15, 0)); + gpio_config.fixed_direction_output = fixed_direction_output; + return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config)); } EXPORT_SYMBOL_GPL(devm_idio_16_regmap_register); diff --git a/drivers/gpio/gpio-ljca.c b/drivers/gpio/gpio-ljca.c index 3b4f8830c741..f32d1d237795 100644 --- a/drivers/gpio/gpio-ljca.c +++ b/drivers/gpio/gpio-ljca.c @@ -286,22 +286,14 @@ static void ljca_gpio_event_cb(void *context, u8 cmd, const void *evt_data, { const struct ljca_gpio_packet *packet = evt_data; struct ljca_gpio_dev *ljca_gpio = context; - int i, irq; + int i; if (cmd != LJCA_GPIO_INT_EVENT) return; for (i = 0; i < packet->num; i++) { - irq = irq_find_mapping(ljca_gpio->gc.irq.domain, - packet->item[i].index); - if (!irq) { - dev_err(ljca_gpio->gc.parent, - "gpio_id %u does not mapped to IRQ yet\n", - packet->item[i].index); - return; - } - - generic_handle_domain_irq(ljca_gpio->gc.irq.domain, irq); + generic_handle_domain_irq(ljca_gpio->gc.irq.domain, + packet->item[i].index); set_bit(packet->item[i].index, ljca_gpio->reenable_irqs); } diff --git a/drivers/gpio/gpio-pci-idio-16.c b/drivers/gpio/gpio-pci-idio-16.c index 476cea1b5ed7..9d28ca8e1d6f 100644 --- a/drivers/gpio/gpio-pci-idio-16.c +++ b/drivers/gpio/gpio-pci-idio-16.c @@ -41,6 +41,7 @@ static const struct regmap_config idio_16_regmap_config = { .reg_stride = 1, .val_bits = 8, .io_port = true, + .max_register = 0x7, .wr_table = &idio_16_wr_table, .rd_table = &idio_16_rd_table, .volatile_table = &idio_16_rd_table, diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c index ab9e4077fa60..f4267af00027 100644 --- a/drivers/gpio/gpio-regmap.c +++ b/drivers/gpio/gpio-regmap.c @@ -31,6 +31,7 @@ struct gpio_regmap { unsigned int reg_clr_base; unsigned int reg_dir_in_base; unsigned int reg_dir_out_base; + unsigned long *fixed_direction_output; #ifdef CONFIG_REGMAP_IRQ int regmap_irq_line; @@ -134,6 +135,13 @@ static int gpio_regmap_get_direction(struct gpio_chip *chip, unsigned int base, val, reg, mask; int invert, ret; + if (gpio->fixed_direction_output) { + if (test_bit(offset, gpio->fixed_direction_output)) + return GPIO_LINE_DIRECTION_OUT; + else + return GPIO_LINE_DIRECTION_IN; + } + if (gpio->reg_dat_base && !gpio->reg_set_base) return GPIO_LINE_DIRECTION_IN; if (gpio->reg_set_base && !gpio->reg_dat_base) @@ -284,6 +292,17 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config goto err_free_gpio; } + if (config->fixed_direction_output) { + gpio->fixed_direction_output = bitmap_alloc(chip->ngpio, + GFP_KERNEL); + if (!gpio->fixed_direction_output) { + ret = -ENOMEM; + goto err_free_gpio; + } + bitmap_copy(gpio->fixed_direction_output, + config->fixed_direction_output, chip->ngpio); + } + /* if not set, assume there is only one register */ gpio->ngpio_per_reg = config->ngpio_per_reg; if (!gpio->ngpio_per_reg) @@ -300,7 +319,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config ret = gpiochip_add_data(chip, gpio); if (ret < 0) - goto err_free_gpio; + goto err_free_bitmap; #ifdef CONFIG_REGMAP_IRQ if (config->regmap_irq_chip) { @@ -309,7 +328,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config config->regmap_irq_line, config->regmap_irq_flags, 0, config->regmap_irq_chip, &gpio->irq_chip_data); if (ret) - goto err_free_gpio; + goto err_free_bitmap; irq_domain = regmap_irq_get_domain(gpio->irq_chip_data); } else @@ -326,6 +345,8 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config err_remove_gpiochip: gpiochip_remove(chip); +err_free_bitmap: + bitmap_free(gpio->fixed_direction_output); err_free_gpio: kfree(gpio); return ERR_PTR(ret); @@ -344,6 +365,7 @@ void gpio_regmap_unregister(struct gpio_regmap *gpio) #endif gpiochip_remove(&gpio->gpio_chip); + bitmap_free(gpio->fixed_direction_output); kfree(gpio); } EXPORT_SYMBOL_GPL(gpio_regmap_unregister); diff --git a/drivers/gpio/gpiolib-acpi-core.c b/drivers/gpio/gpiolib-acpi-core.c index 284e762d92c4..d441c1236d8c 100644 --- a/drivers/gpio/gpiolib-acpi-core.c +++ b/drivers/gpio/gpiolib-acpi-core.c @@ -291,6 +291,19 @@ acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio, int polarity) return GPIOD_ASIS; } +static void acpi_gpio_set_debounce_timeout(struct gpio_desc *desc, + unsigned int acpi_debounce) +{ + int ret; + + /* ACPI uses hundredths of milliseconds units */ + acpi_debounce *= 10; + ret = gpio_set_debounce_timeout(desc, acpi_debounce); + if (ret) + gpiod_warn(desc, "Failed to set debounce-timeout %u: %d\n", + acpi_debounce, ret); +} + static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip, struct acpi_resource_gpio *agpio, unsigned int index, @@ -300,18 +313,12 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip, enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity); unsigned int pin = agpio->pin_table[index]; struct gpio_desc *desc; - int ret; desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags); if (IS_ERR(desc)) return desc; - /* ACPI uses hundredths of milliseconds units */ - ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout * 10); - if (ret) - dev_warn(chip->parent, - "Failed to set debounce-timeout for pin 0x%04X, err %d\n", - pin, ret); + acpi_gpio_set_debounce_timeout(desc, agpio->debounce_timeout); return desc; } @@ -375,8 +382,8 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares, desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event"); if (IS_ERR(desc)) { dev_err(chip->parent, - "Failed to request GPIO for pin 0x%04X, err %ld\n", - pin, PTR_ERR(desc)); + "Failed to request GPIO for pin 0x%04X, err %pe\n", + pin, desc); return AE_OK; } @@ -944,7 +951,6 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode, bool can_fallback = acpi_can_fallback_to_crs(adev, con_id); struct acpi_gpio_info info = {}; struct gpio_desc *desc; - int ret; desc = __acpi_find_gpio(fwnode, con_id, idx, can_fallback, &info); if (IS_ERR(desc)) @@ -959,10 +965,7 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode, acpi_gpio_update_gpiod_flags(dflags, &info); acpi_gpio_update_gpiod_lookup_flags(lookupflags, &info); - /* ACPI uses hundredths of milliseconds units */ - ret = gpio_set_debounce_timeout(desc, info.debounce * 10); - if (ret) - return ERR_PTR(ret); + acpi_gpio_set_debounce_timeout(desc, info.debounce); return desc; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6597475e245d..bfa3199591b6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -551,13 +551,13 @@ static void schedule_dc_vmin_vmax(struct amdgpu_device *adev, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust) { - struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_KERNEL); + struct vupdate_offload_work *offload_work = kzalloc(sizeof(*offload_work), GFP_NOWAIT); if (!offload_work) { drm_dbg_driver(adev_to_drm(adev), "Failed to allocate vupdate_offload_work\n"); return; } - struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_KERNEL); + struct dc_crtc_timing_adjust *adjust_copy = kzalloc(sizeof(*adjust_copy), GFP_NOWAIT); if (!adjust_copy) { drm_dbg_driver(adev_to_drm(adev), "Failed to allocate adjust_copy\n"); kfree(offload_work); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index 7c276c319086..ce3d0b45fb4c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -200,6 +200,9 @@ void dcn401_init_hw(struct dc *dc) */ struct dc_link *link = dc->links[i]; + if (link->ep_type != DISPLAY_ENDPOINT_PHY) + continue; + link->link_enc->funcs->hw_init(link->link_enc); /* Check for enabled DIG to identify enabled display */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 41c76ba9ba56..62a39204fe0b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -44,7 +44,13 @@ */ #define MAX_PIPES 6 #define MAX_PHANTOM_PIPES (MAX_PIPES / 2) -#define MAX_LINKS (MAX_PIPES * 2 +2) + +#define MAX_DPIA 6 +#define MAX_CONNECTOR 6 +#define MAX_VIRTUAL_LINKS 4 + +#define MAX_LINKS (MAX_DPIA + MAX_CONNECTOR + MAX_VIRTUAL_LINKS) + #define MAX_DIG_LINK_ENCODERS 7 #define MAX_DWB_PIPES 1 #define MAX_HPO_DP2_ENCODERS 4 diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 9e33bf937a69..2676ae9f6fe8 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -78,6 +78,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link, struct audio_output audio_output[MAX_PIPES]; struct dc_stream_state *streams_on_link[MAX_PIPES]; int num_streams_on_link = 0; + struct dc *dc = (struct dc *)link->dc; needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) != link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings)); @@ -150,7 +151,7 @@ static void dp_retrain_link_dp_test(struct dc_link *link, if (streams_on_link[i] && streams_on_link[i]->link && streams_on_link[i]->link == link) { stream_update.stream = streams_on_link[i]; stream_update.dpms_off = &dpms_off; - dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, streams_on_link[i], &stream_update); + dc_update_planes_and_stream(dc, NULL, 0, streams_on_link[i], &stream_update); } } } diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c index 1d6312fa1429..d4b6ea42db0f 100644 --- a/drivers/gpu/drm/drm_panic.c +++ b/drivers/gpu/drm/drm_panic.c @@ -174,6 +174,33 @@ static void drm_panic_write_pixel24(void *vaddr, unsigned int offset, u32 color) *p = color & 0xff; } +/* + * Special case if the pixel crosses page boundaries + */ +static void drm_panic_write_pixel24_xpage(void *vaddr, struct page *next_page, + unsigned int offset, u32 color) +{ + u8 *vaddr2; + u8 *p = vaddr + offset; + + vaddr2 = kmap_local_page_try_from_panic(next_page); + + *p++ = color & 0xff; + color >>= 8; + + if (offset == PAGE_SIZE - 1) + p = vaddr2; + + *p++ = color & 0xff; + color >>= 8; + + if (offset == PAGE_SIZE - 2) + p = vaddr2; + + *p = color & 0xff; + kunmap_local(vaddr2); +} + static void drm_panic_write_pixel32(void *vaddr, unsigned int offset, u32 color) { u32 *p = vaddr + offset; @@ -231,7 +258,14 @@ static void drm_panic_blit_page(struct page **pages, unsigned int dpitch, page = new_page; vaddr = kmap_local_page_try_from_panic(pages[page]); } - if (vaddr) + if (!vaddr) + continue; + + // Special case for 24bit, as a pixel might cross page boundaries + if (cpp == 3 && offset + 3 > PAGE_SIZE) + drm_panic_write_pixel24_xpage(vaddr, pages[page + 1], + offset, fg32); + else drm_panic_write_pixel(vaddr, offset, fg32, cpp); } } @@ -321,7 +355,15 @@ static void drm_panic_fill_page(struct page **pages, unsigned int dpitch, page = new_page; vaddr = kmap_local_page_try_from_panic(pages[page]); } - drm_panic_write_pixel(vaddr, offset, color, cpp); + if (!vaddr) + continue; + + // Special case for 24bit, as a pixel might cross page boundaries + if (cpp == 3 && offset + 3 > PAGE_SIZE) + drm_panic_write_pixel24_xpage(vaddr, pages[page + 1], + offset, color); + else + drm_panic_write_pixel(vaddr, offset, color, cpp); } } if (vaddr) @@ -429,6 +471,9 @@ static void drm_panic_logo_rect(struct drm_rect *rect, const struct font_desc *f static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *rect, const struct font_desc *font, u32 fg_color) { + if (rect->x2 > sb->width || rect->y2 > sb->height) + return; + if (logo_mono) drm_panic_blit(sb, rect, logo_mono->data, DIV_ROUND_UP(drm_rect_width(rect), 8), 1, fg_color); @@ -477,7 +522,7 @@ static int draw_line_with_wrap(struct drm_scanout_buffer *sb, const struct font_ struct drm_panic_line *line, int yoffset, u32 fg_color) { int chars_per_row = sb->width / font->width; - struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, sb->height); + struct drm_rect r_txt = DRM_RECT_INIT(0, yoffset, sb->width, font->height); struct drm_panic_line line_wrap; if (line->len > chars_per_row) { @@ -520,7 +565,7 @@ static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb) struct drm_panic_line line; int yoffset; - if (!font) + if (!font || font->width > sb->width) return; yoffset = sb->height - font->height - (sb->height % font->height) / 2; @@ -733,7 +778,10 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb) pr_debug("QR width %d and scale %d\n", qr_width, scale); r_qr_canvas = DRM_RECT_INIT(0, 0, qr_canvas_width * scale, qr_canvas_width * scale); - v_margin = (sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg)) / 5; + v_margin = sb->height - drm_rect_height(&r_qr_canvas) - drm_rect_height(&r_msg); + if (v_margin < 0) + return -ENOSPC; + v_margin /= 5; drm_rect_translate(&r_qr_canvas, (sb->width - r_qr_canvas.x2) / 2, 2 * v_margin); r_qr = DRM_RECT_INIT(r_qr_canvas.x1 + QR_MARGIN * scale, r_qr_canvas.y1 + QR_MARGIN * scale, @@ -746,7 +794,7 @@ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb) /* Fill with the background color, and draw text on top */ drm_panic_fill(sb, &r_screen, bg_color); - if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr)) + if (!drm_rect_overlap(&r_logo, &r_msg) && !drm_rect_overlap(&r_logo, &r_qr_canvas)) drm_panic_logo_draw(sb, &r_logo, font, fg_color); draw_txt_rectangle(sb, font, panic_msg, panic_msg_lines, true, &r_msg, fg_color); diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index b817ff44c043..c48384e58ea1 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -2117,6 +2117,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) intel_frontbuffer_put(intel_fb->frontbuffer); + kfree(intel_fb->panic); kfree(intel_fb); } @@ -2215,16 +2216,22 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, struct intel_display *display = to_intel_display(obj->dev); struct drm_framebuffer *fb = &intel_fb->base; u32 max_stride; - int ret = -EINVAL; + int ret; int i; + intel_fb->panic = intel_panic_alloc(); + if (!intel_fb->panic) + return -ENOMEM; + /* * intel_frontbuffer_get() must be done before * intel_fb_bo_framebuffer_init() to avoid set_tiling vs. addfb race. */ intel_fb->frontbuffer = intel_frontbuffer_get(obj); - if (!intel_fb->frontbuffer) - return -ENOMEM; + if (!intel_fb->frontbuffer) { + ret = -ENOMEM; + goto err_free_panic; + } ret = intel_fb_bo_framebuffer_init(fb, obj, mode_cmd); if (ret) @@ -2323,6 +2330,9 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, intel_fb_bo_framebuffer_fini(obj); err_frontbuffer_put: intel_frontbuffer_put(intel_fb->frontbuffer); +err_free_panic: + kfree(intel_fb->panic); + return ret; } @@ -2349,20 +2359,11 @@ intel_user_framebuffer_create(struct drm_device *dev, struct intel_framebuffer *intel_framebuffer_alloc(void) { struct intel_framebuffer *intel_fb; - struct intel_panic *panic; intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); if (!intel_fb) return NULL; - panic = intel_panic_alloc(); - if (!panic) { - kfree(intel_fb); - return NULL; - } - - intel_fb->panic = panic; - return intel_fb; } diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 6dec4354e378..7870e7dbaa5d 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -1175,10 +1175,14 @@ panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx) break; case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: - /* Partial unmaps might trigger a remap with either a prev or a next VA, - * but not both. + /* Two VMAs can be needed for an unmap, as an unmap can happen + * in the middle of a drm_gpuva, requiring a remap with both + * prev & next VA. Or an unmap can span more than one drm_gpuva + * where the first and last ones are covered partially, requring + * a remap for the first with a prev VA and remap for the last + * with a next VA. */ - vma_count = 1; + vma_count = 2; break; default: diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 7b613997bb50..727cdf768161 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -361,7 +361,7 @@ static void dw_hdmi_rk3228_setup_hpd(struct dw_hdmi *dw_hdmi, void *data) regmap_write(hdmi->regmap, RK3228_GRF_SOC_CON2, FIELD_PREP_WM16(RK3228_HDMI_SDAIN_MSK, 1) | - FIELD_PREP_WM16(RK3328_HDMI_SCLIN_MSK, 1)); + FIELD_PREP_WM16(RK3228_HDMI_SCLIN_MSK, 1)); } static enum drm_connector_status diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index 7fdd0a97a628..5edc0cad47e2 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -292,6 +292,9 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt) ggtt->pt_ops = &xelp_pt_ops; ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM); + if (!ggtt->wq) + return -ENOMEM; + __xe_ggtt_init_early(ggtt, xe_wopcm_size(xe)); err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt); diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c index a1c88f9a6c76..07f96bda638a 100644 --- a/drivers/gpu/drm/xe/xe_pt.c +++ b/drivers/gpu/drm/xe/xe_pt.c @@ -2022,7 +2022,7 @@ static int op_prepare(struct xe_vm *vm, case DRM_GPUVA_OP_MAP: if ((!op->map.immediate && xe_vm_in_fault_mode(vm) && !op->map.invalidate_on_bind) || - op->map.is_cpu_addr_mirror) + (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) break; err = bind_op_prepare(vm, tile, pt_update_ops, op->map.vma, @@ -2252,7 +2252,7 @@ static void op_commit(struct xe_vm *vm, switch (op->base.op) { case DRM_GPUVA_OP_MAP: if ((!op->map.immediate && xe_vm_in_fault_mode(vm)) || - op->map.is_cpu_addr_mirror) + (op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) break; bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence, diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index da2a412f80c0..129e7818565c 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -302,6 +302,11 @@ static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 range_start, u64 if (!vma) return -EINVAL; + if (!(vma->gpuva.flags & XE_VMA_MADV_AUTORESET)) { + drm_dbg(&vm->xe->drm, "Skipping madvise reset for vma.\n"); + return 0; + } + if (xe_vma_has_default_mem_attrs(vma)) return 0; diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index f602b874e054..63c65e3d207b 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -616,6 +616,13 @@ static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask, vops->pt_update_ops[i].num_ops += inc_val; } +#define XE_VMA_CREATE_MASK ( \ + XE_VMA_READ_ONLY | \ + XE_VMA_DUMPABLE | \ + XE_VMA_SYSTEM_ALLOCATOR | \ + DRM_GPUVA_SPARSE | \ + XE_VMA_MADV_AUTORESET) + static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma, u8 tile_mask) { @@ -628,8 +635,7 @@ static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma, op->base.map.gem.offset = vma->gpuva.gem.offset; op->map.vma = vma; op->map.immediate = true; - op->map.dumpable = vma->gpuva.flags & XE_VMA_DUMPABLE; - op->map.is_null = xe_vma_is_null(vma); + op->map.vma_flags = vma->gpuva.flags & XE_VMA_CREATE_MASK; } static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma, @@ -932,11 +938,6 @@ static void xe_vma_free(struct xe_vma *vma) kfree(vma); } -#define VMA_CREATE_FLAG_READ_ONLY BIT(0) -#define VMA_CREATE_FLAG_IS_NULL BIT(1) -#define VMA_CREATE_FLAG_DUMPABLE BIT(2) -#define VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR BIT(3) - static struct xe_vma *xe_vma_create(struct xe_vm *vm, struct xe_bo *bo, u64 bo_offset_or_userptr, @@ -947,11 +948,8 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, struct xe_vma *vma; struct xe_tile *tile; u8 id; - bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY); - bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL); - bool dumpable = (flags & VMA_CREATE_FLAG_DUMPABLE); - bool is_cpu_addr_mirror = - (flags & VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR); + bool is_null = (flags & DRM_GPUVA_SPARSE); + bool is_cpu_addr_mirror = (flags & XE_VMA_SYSTEM_ALLOCATOR); xe_assert(vm->xe, start < end); xe_assert(vm->xe, end < vm->size); @@ -972,10 +970,6 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, if (!vma) return ERR_PTR(-ENOMEM); - if (is_cpu_addr_mirror) - vma->gpuva.flags |= XE_VMA_SYSTEM_ALLOCATOR; - if (is_null) - vma->gpuva.flags |= DRM_GPUVA_SPARSE; if (bo) vma->gpuva.gem.obj = &bo->ttm.base; } @@ -986,10 +980,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, vma->gpuva.vm = &vm->gpuvm; vma->gpuva.va.addr = start; vma->gpuva.va.range = end - start + 1; - if (read_only) - vma->gpuva.flags |= XE_VMA_READ_ONLY; - if (dumpable) - vma->gpuva.flags |= XE_VMA_DUMPABLE; + vma->gpuva.flags = flags; for_each_tile(tile, vm->xe, id) vma->tile_mask |= 0x1 << id; @@ -2272,12 +2263,16 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops, if (__op->op == DRM_GPUVA_OP_MAP) { op->map.immediate = flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE; - op->map.read_only = - flags & DRM_XE_VM_BIND_FLAG_READONLY; - op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL; - op->map.is_cpu_addr_mirror = flags & - DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR; - op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE; + if (flags & DRM_XE_VM_BIND_FLAG_READONLY) + op->map.vma_flags |= XE_VMA_READ_ONLY; + if (flags & DRM_XE_VM_BIND_FLAG_NULL) + op->map.vma_flags |= DRM_GPUVA_SPARSE; + if (flags & DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR) + op->map.vma_flags |= XE_VMA_SYSTEM_ALLOCATOR; + if (flags & DRM_XE_VM_BIND_FLAG_DUMPABLE) + op->map.vma_flags |= XE_VMA_DUMPABLE; + if (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) + op->map.vma_flags |= XE_VMA_MADV_AUTORESET; op->map.pat_index = pat_index; op->map.invalidate_on_bind = __xe_vm_needs_clear_scratch_pages(vm, flags); @@ -2590,14 +2585,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, .pat_index = op->map.pat_index, }; - flags |= op->map.read_only ? - VMA_CREATE_FLAG_READ_ONLY : 0; - flags |= op->map.is_null ? - VMA_CREATE_FLAG_IS_NULL : 0; - flags |= op->map.dumpable ? - VMA_CREATE_FLAG_DUMPABLE : 0; - flags |= op->map.is_cpu_addr_mirror ? - VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0; + flags |= op->map.vma_flags & XE_VMA_CREATE_MASK; vma = new_vma(vm, &op->base.map, &default_attr, flags); @@ -2606,7 +2594,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, op->map.vma = vma; if (((op->map.immediate || !xe_vm_in_fault_mode(vm)) && - !op->map.is_cpu_addr_mirror) || + !(op->map.vma_flags & XE_VMA_SYSTEM_ALLOCATOR)) || op->map.invalidate_on_bind) xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1); @@ -2637,18 +2625,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops, op->remap.start = xe_vma_start(old); op->remap.range = xe_vma_size(old); - flags |= op->base.remap.unmap->va->flags & - XE_VMA_READ_ONLY ? - VMA_CREATE_FLAG_READ_ONLY : 0; - flags |= op->base.remap.unmap->va->flags & - DRM_GPUVA_SPARSE ? - VMA_CREATE_FLAG_IS_NULL : 0; - flags |= op->base.remap.unmap->va->flags & - XE_VMA_DUMPABLE ? - VMA_CREATE_FLAG_DUMPABLE : 0; - flags |= xe_vma_is_cpu_addr_mirror(old) ? - VMA_CREATE_FLAG_IS_SYSTEM_ALLOCATOR : 0; - + flags |= op->base.remap.unmap->va->flags & XE_VMA_CREATE_MASK; if (op->base.remap.prev) { vma = new_vma(vm, op->base.remap.prev, &old->attr, flags); @@ -3279,7 +3256,8 @@ ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_execute, ERRNO); DRM_XE_VM_BIND_FLAG_NULL | \ DRM_XE_VM_BIND_FLAG_DUMPABLE | \ DRM_XE_VM_BIND_FLAG_CHECK_PXP | \ - DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR) + DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR | \ + DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) #ifdef TEST_VM_OPS_ERROR #define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR) @@ -3394,7 +3372,9 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm, XE_IOCTL_DBG(xe, (prefetch_region != DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC && !(BIT(prefetch_region) & xe->info.mem_region_mask))) || XE_IOCTL_DBG(xe, obj && - op == DRM_XE_VM_BIND_OP_UNMAP)) { + op == DRM_XE_VM_BIND_OP_UNMAP) || + XE_IOCTL_DBG(xe, (flags & DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET) && + (!is_cpu_addr_mirror || op != DRM_XE_VM_BIND_OP_MAP))) { err = -EINVAL; goto free_bind_ops; } @@ -4212,7 +4192,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm, struct xe_vma_ops vops; struct drm_gpuva_ops *ops = NULL; struct drm_gpuva_op *__op; - bool is_cpu_addr_mirror = false; + unsigned int vma_flags = 0; bool remap_op = false; struct xe_vma_mem_attr tmp_attr; u16 default_pat; @@ -4242,15 +4222,17 @@ static int xe_vm_alloc_vma(struct xe_vm *vm, vma = gpuva_to_vma(op->base.unmap.va); XE_WARN_ON(!xe_vma_has_default_mem_attrs(vma)); default_pat = vma->attr.default_pat_index; + vma_flags = vma->gpuva.flags; } if (__op->op == DRM_GPUVA_OP_REMAP) { vma = gpuva_to_vma(op->base.remap.unmap->va); default_pat = vma->attr.default_pat_index; + vma_flags = vma->gpuva.flags; } if (__op->op == DRM_GPUVA_OP_MAP) { - op->map.is_cpu_addr_mirror = true; + op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK; op->map.pat_index = default_pat; } } else { @@ -4259,11 +4241,7 @@ static int xe_vm_alloc_vma(struct xe_vm *vm, xe_assert(vm->xe, !remap_op); xe_assert(vm->xe, xe_vma_has_no_bo(vma)); remap_op = true; - - if (xe_vma_is_cpu_addr_mirror(vma)) - is_cpu_addr_mirror = true; - else - is_cpu_addr_mirror = false; + vma_flags = vma->gpuva.flags; } if (__op->op == DRM_GPUVA_OP_MAP) { @@ -4272,10 +4250,10 @@ static int xe_vm_alloc_vma(struct xe_vm *vm, /* * In case of madvise ops DRM_GPUVA_OP_MAP is * always after DRM_GPUVA_OP_REMAP, so ensure - * we assign op->map.is_cpu_addr_mirror true - * if REMAP is for xe_vma_is_cpu_addr_mirror vma + * to propagate the flags from the vma we're + * unmapping. */ - op->map.is_cpu_addr_mirror = is_cpu_addr_mirror; + op->map.vma_flags |= vma_flags & XE_VMA_CREATE_MASK; } } print_op(vm->xe, __op); diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index 413353e1c225..d6e2a0fdd4b3 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -46,6 +46,7 @@ struct xe_vm_pgtable_update_op; #define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 7) #define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 8) #define XE_VMA_SYSTEM_ALLOCATOR (DRM_GPUVA_USERBITS << 9) +#define XE_VMA_MADV_AUTORESET (DRM_GPUVA_USERBITS << 10) /** * struct xe_vma_mem_attr - memory attributes associated with vma @@ -345,17 +346,10 @@ struct xe_vm { struct xe_vma_op_map { /** @vma: VMA to map */ struct xe_vma *vma; + unsigned int vma_flags; /** @immediate: Immediate bind */ bool immediate; /** @read_only: Read only */ - bool read_only; - /** @is_null: is NULL binding */ - bool is_null; - /** @is_cpu_addr_mirror: is CPU address mirror binding */ - bool is_cpu_addr_mirror; - /** @dumpable: whether BO is dumped on GPU hang */ - bool dumpable; - /** @invalidate: invalidate the VMA before bind */ bool invalidate_on_bind; /** @pat_index: The pat index to use for this operation. */ u16 pat_index; diff --git a/drivers/hwmon/cgbc-hwmon.c b/drivers/hwmon/cgbc-hwmon.c index 772f44d56ccf..3aff4e092132 100644 --- a/drivers/hwmon/cgbc-hwmon.c +++ b/drivers/hwmon/cgbc-hwmon.c @@ -107,6 +107,9 @@ static int cgbc_hwmon_probe_sensors(struct device *dev, struct cgbc_hwmon_data * nb_sensors = data[0]; hwmon->sensors = devm_kzalloc(dev, sizeof(*hwmon->sensors) * nb_sensors, GFP_KERNEL); + if (!hwmon->sensors) + return -ENOMEM; + sensor = hwmon->sensors; for (i = 0; i < nb_sensors; i++) { diff --git a/drivers/hwmon/gpd-fan.c b/drivers/hwmon/gpd-fan.c index 644dc3ca9df7..321794807e8d 100644 --- a/drivers/hwmon/gpd-fan.c +++ b/drivers/hwmon/gpd-fan.c @@ -615,14 +615,14 @@ static int gpd_fan_probe(struct platform_device *pdev) const struct device *hwdev; res = platform_get_resource(pdev, IORESOURCE_IO, 0); - if (IS_ERR(res)) - return dev_err_probe(dev, PTR_ERR(res), + if (!res) + return dev_err_probe(dev, -EINVAL, "Failed to get platform resource\n"); region = devm_request_region(dev, res->start, resource_size(res), DRIVER_NAME); - if (IS_ERR(region)) - return dev_err_probe(dev, PTR_ERR(region), + if (!region) + return dev_err_probe(dev, -EBUSY, "Failed to request region\n"); hwdev = devm_hwmon_device_register_with_info(dev, @@ -631,7 +631,7 @@ static int gpd_fan_probe(struct platform_device *pdev) &gpd_fan_chip_info, NULL); if (IS_ERR(hwdev)) - return dev_err_probe(dev, PTR_ERR(region), + return dev_err_probe(dev, PTR_ERR(hwdev), "Failed to register hwmon device\n"); return 0; diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c index 52cf62e45a86..6bba9b50c51b 100644 --- a/drivers/hwmon/pmbus/isl68137.c +++ b/drivers/hwmon/pmbus/isl68137.c @@ -336,10 +336,9 @@ static int isl68137_probe_from_dt(struct device *dev, struct isl68137_data *data) { const struct device_node *np = dev->of_node; - struct device_node *child; int err; - for_each_child_of_node(np, child) { + for_each_child_of_node_scoped(np, child) { if (strcmp(child->name, "channel")) continue; diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c index 56834d26f8ef..ef981ed97da8 100644 --- a/drivers/hwmon/pmbus/max34440.c +++ b/drivers/hwmon/pmbus/max34440.c @@ -336,18 +336,18 @@ static struct pmbus_driver_info max34440_info[] = { .format[PSC_CURRENT_IN] = direct, .format[PSC_CURRENT_OUT] = direct, .format[PSC_TEMPERATURE] = direct, - .m[PSC_VOLTAGE_IN] = 1, + .m[PSC_VOLTAGE_IN] = 125, .b[PSC_VOLTAGE_IN] = 0, .R[PSC_VOLTAGE_IN] = 0, - .m[PSC_VOLTAGE_OUT] = 1, + .m[PSC_VOLTAGE_OUT] = 125, .b[PSC_VOLTAGE_OUT] = 0, .R[PSC_VOLTAGE_OUT] = 0, - .m[PSC_CURRENT_IN] = 1, + .m[PSC_CURRENT_IN] = 250, .b[PSC_CURRENT_IN] = 0, - .R[PSC_CURRENT_IN] = 2, - .m[PSC_CURRENT_OUT] = 1, + .R[PSC_CURRENT_IN] = -1, + .m[PSC_CURRENT_OUT] = 250, .b[PSC_CURRENT_OUT] = 0, - .R[PSC_CURRENT_OUT] = 2, + .R[PSC_CURRENT_OUT] = -1, .m[PSC_TEMPERATURE] = 1, .b[PSC_TEMPERATURE] = 0, .R[PSC_TEMPERATURE] = 2, diff --git a/drivers/hwmon/sht3x.c b/drivers/hwmon/sht3x.c index 557ad3e7752a..f36c0229328f 100644 --- a/drivers/hwmon/sht3x.c +++ b/drivers/hwmon/sht3x.c @@ -291,24 +291,26 @@ static struct sht3x_data *sht3x_update_client(struct device *dev) return data; } -static int temp1_input_read(struct device *dev) +static int temp1_input_read(struct device *dev, long *temp) { struct sht3x_data *data = sht3x_update_client(dev); if (IS_ERR(data)) return PTR_ERR(data); - return data->temperature; + *temp = data->temperature; + return 0; } -static int humidity1_input_read(struct device *dev) +static int humidity1_input_read(struct device *dev, long *humidity) { struct sht3x_data *data = sht3x_update_client(dev); if (IS_ERR(data)) return PTR_ERR(data); - return data->humidity; + *humidity = data->humidity; + return 0; } /* @@ -706,6 +708,7 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { enum sht3x_limits index; + int ret; switch (type) { case hwmon_chip: @@ -720,10 +723,12 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type, case hwmon_temp: switch (attr) { case hwmon_temp_input: - *val = temp1_input_read(dev); - break; + return temp1_input_read(dev, val); case hwmon_temp_alarm: - *val = temp1_alarm_read(dev); + ret = temp1_alarm_read(dev); + if (ret < 0) + return ret; + *val = ret; break; case hwmon_temp_max: index = limit_max; @@ -748,10 +753,12 @@ static int sht3x_read(struct device *dev, enum hwmon_sensor_types type, case hwmon_humidity: switch (attr) { case hwmon_humidity_input: - *val = humidity1_input_read(dev); - break; + return humidity1_input_read(dev, val); case hwmon_humidity_alarm: - *val = humidity1_alarm_read(dev); + ret = humidity1_alarm_read(dev); + if (ret < 0) + return ret; + *val = ret; break; case hwmon_humidity_max: index = limit_max; diff --git a/drivers/misc/amd-sbi/Kconfig b/drivers/misc/amd-sbi/Kconfig index 4aae0733d0fc..ab594908cb4a 100644 --- a/drivers/misc/amd-sbi/Kconfig +++ b/drivers/misc/amd-sbi/Kconfig @@ -2,9 +2,11 @@ config AMD_SBRMI_I2C tristate "AMD side band RMI support" depends on I2C + depends on ARM || ARM64 || COMPILE_TEST select REGMAP_I2C help Side band RMI over I2C support for AMD out of band management. + This driver is intended to run on the BMC, not the managed node. This driver can also be built as a module. If so, the module will be called sbrmi-i2c. diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 621bce7e101c..ee652ef01534 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -381,6 +381,8 @@ static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd, } spin_unlock(&fl->lock); + dma_buf_put(buf); + return ret; } diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index bc40b940ae21..a4f75dc36929 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -120,6 +120,8 @@ #define MEI_DEV_ID_PTL_H 0xE370 /* Panther Lake H */ #define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */ +#define MEI_DEV_ID_WCL_P 0x4D70 /* Wildcat Lake P */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/mei_lb.c b/drivers/misc/mei/mei_lb.c index 77686b108d3c..78717ee8ac9a 100644 --- a/drivers/misc/mei/mei_lb.c +++ b/drivers/misc/mei/mei_lb.c @@ -134,8 +134,7 @@ static bool mei_lb_check_response(const struct device *dev, ssize_t bytes, return true; } -static int mei_lb_push_payload(struct device *dev, - enum intel_lb_type type, u32 flags, +static int mei_lb_push_payload(struct device *dev, u32 type, u32 flags, const void *payload, size_t payload_size) { struct mei_cl_device *cldev; diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index b108a7c22388..b017ff29dbd1 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -127,6 +127,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_H, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_WCL_P, MEI_ME_PCH15_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c index c9eb5c5393e4..06b55a891c6b 100644 --- a/drivers/misc/mei/pci-txe.c +++ b/drivers/misc/mei/pci-txe.c @@ -109,19 +109,19 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto end; } + err = mei_register(dev, &pdev->dev); + if (err) + goto release_irq; + if (mei_start(dev)) { dev_err(&pdev->dev, "init hw failure.\n"); err = -ENODEV; - goto release_irq; + goto deregister; } pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_TXI_RPM_TIMEOUT); pm_runtime_use_autosuspend(&pdev->dev); - err = mei_register(dev, &pdev->dev); - if (err) - goto stop; - pci_set_drvdata(pdev, dev); /* @@ -144,8 +144,8 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; -stop: - mei_stop(dev); +deregister: + mei_deregister(dev); release_irq: mei_cancel_work(dev); mei_disable_interrupts(dev); diff --git a/drivers/most/most_usb.c b/drivers/most/most_usb.c index cf5be9c449a5..10064d7b7249 100644 --- a/drivers/most/most_usb.c +++ b/drivers/most/most_usb.c @@ -929,6 +929,10 @@ static void release_mdev(struct device *dev) { struct most_dev *mdev = to_mdev_from_dev(dev); + kfree(mdev->busy_urbs); + kfree(mdev->cap); + kfree(mdev->conf); + kfree(mdev->ep_address); kfree(mdev); } /** @@ -1093,7 +1097,7 @@ hdm_probe(struct usb_interface *interface, const struct usb_device_id *id) err_free_conf: kfree(mdev->conf); err_free_mdev: - put_device(&mdev->dev); + kfree(mdev); return ret; } @@ -1121,13 +1125,6 @@ static void hdm_disconnect(struct usb_interface *interface) if (mdev->dci) device_unregister(&mdev->dci->dev); most_deregister_interface(&mdev->iface); - - kfree(mdev->busy_urbs); - kfree(mdev->cap); - kfree(mdev->conf); - kfree(mdev->ep_address); - put_device(&mdev->dci->dev); - put_device(&mdev->dev); } static int hdm_suspend(struct usb_interface *interface, pm_message_t message) diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c index ecd9a0bd5e18..49b57bb5fac1 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c @@ -290,9 +290,15 @@ static int ch_ipsec_xfrm_add_state(struct net_device *dev, return -EINVAL; } + if (unlikely(!try_module_get(THIS_MODULE))) { + NL_SET_ERR_MSG_MOD(extack, "Failed to acquire module reference"); + return -ENODEV; + } + sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL); if (!sa_entry) { res = -ENOMEM; + module_put(THIS_MODULE); goto out; } @@ -301,7 +307,6 @@ static int ch_ipsec_xfrm_add_state(struct net_device *dev, sa_entry->esn = 1; ch_ipsec_setkey(x, sa_entry); x->xso.offload_handle = (unsigned long)sa_entry; - try_module_get(THIS_MODULE); out: return res; } diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h index ea09a09c451b..2097e4c2b3d7 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_common.h @@ -17,6 +17,7 @@ #define HBG_PCU_CACHE_LINE_SIZE 32 #define HBG_TX_TIMEOUT_BUF_LEN 1024 #define HBG_RX_DESCR 0x01 +#define HBG_NO_PHY 0xFF #define HBG_PACKET_HEAD_SIZE ((HBG_RX_SKIP1 + HBG_RX_SKIP2 + \ HBG_RX_DESCR) * HBG_PCU_CACHE_LINE_SIZE) diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c index 83cf75bf7a17..e11495b7ee98 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_err.c @@ -136,12 +136,11 @@ static pci_ers_result_t hbg_pci_err_detected(struct pci_dev *pdev, { struct net_device *netdev = pci_get_drvdata(pdev); - netif_device_detach(netdev); - - if (state == pci_channel_io_perm_failure) + if (state == pci_channel_io_perm_failure) { + netif_device_detach(netdev); return PCI_ERS_RESULT_DISCONNECT; + } - pci_disable_device(pdev); return PCI_ERS_RESULT_NEED_RESET; } @@ -150,6 +149,9 @@ static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev) struct net_device *netdev = pci_get_drvdata(pdev); struct hbg_priv *priv = netdev_priv(netdev); + netif_device_detach(netdev); + pci_disable_device(pdev); + if (pci_enable_device(pdev)) { dev_err(&pdev->dev, "failed to re-enable PCI device after reset\n"); diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c index d0aa0661ecd4..d6e8ce8e351a 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_hw.c @@ -244,6 +244,9 @@ void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex) hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE); + if (priv->mac.phy_addr == HBG_NO_PHY) + return; + /* wait MAC link up */ ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR, link_status, diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c index 8af0bc4cca21..ae4cb35186d8 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_irq.c @@ -32,6 +32,7 @@ static void hbg_irq_handle_rx_buf_val(struct hbg_priv *priv, const struct hbg_irq_info *irq_info) { priv->stats.rx_fifo_less_empty_thrsld_cnt++; + hbg_hw_irq_enable(priv, irq_info->mask, true); } #define HBG_IRQ_I(name, handle) \ diff --git a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c index 37791de47f6f..b6f0a2780ea8 100644 --- a/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c +++ b/drivers/net/ethernet/hisilicon/hibmcge/hbg_mdio.c @@ -20,7 +20,6 @@ #define HBG_MDIO_OP_INTERVAL_US (5 * 1000) #define HBG_NP_LINK_FAIL_RETRY_TIMES 5 -#define HBG_NO_PHY 0xFF static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 81d3bdc098e6..cf8abbe01840 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -9429,8 +9429,7 @@ static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd) /* this command reads phy id and register at the same time */ fallthrough; case SIOCGMIIREG: - data->val_out = hclge_read_phy_reg(hdev, data->reg_num); - return 0; + return hclge_read_phy_reg(hdev, data->reg_num, &data->val_out); case SIOCSMIIREG: return hclge_write_phy_reg(hdev, data->reg_num, data->val_in); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 96553109f44c..cf881108fa57 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -274,7 +274,7 @@ void hclge_mac_stop_phy(struct hclge_dev *hdev) phy_stop(phydev); } -u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr) +int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val) { struct hclge_phy_reg_cmd *req; struct hclge_desc desc; @@ -286,11 +286,14 @@ u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr) req->reg_addr = cpu_to_le16(reg_addr); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) + if (ret) { dev_err(&hdev->pdev->dev, "failed to read phy reg, ret = %d.\n", ret); + return ret; + } - return le16_to_cpu(req->reg_val); + *val = le16_to_cpu(req->reg_val); + return 0; } int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h index 4200d0b6d931..21d434c82475 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h @@ -13,7 +13,7 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle); void hclge_mac_disconnect_phy(struct hnae3_handle *handle); void hclge_mac_start_phy(struct hclge_dev *hdev); void hclge_mac_stop_phy(struct hclge_dev *hdev); -u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr); +int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val); int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 83f5217bce9f..046bc9c65c51 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -4386,6 +4386,15 @@ int ice_get_phy_lane_number(struct ice_hw *hw) unsigned int lane; int err; + /* E82X does not have sequential IDs, lane number is PF ID. + * For E825 device, the exception is the variant with external + * PHY (0x579F), in which there is also 1:1 pf_id -> lane_number + * mapping. + */ + if (hw->mac_type == ICE_MAC_GENERIC || + hw->device_id == ICE_DEV_ID_E825C_SGMII) + return hw->pf_id; + options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL); if (!options) return -ENOMEM; @@ -6500,6 +6509,28 @@ u32 ice_get_link_speed(u16 index) return ice_aq_to_link_speed[index]; } +/** + * ice_get_dest_cgu - get destination CGU dev for given HW + * @hw: pointer to the HW struct + * + * Get CGU client id for CGU register read/write operations. + * + * Return: CGU device id to use in SBQ transactions. + */ +static enum ice_sbq_dev_id ice_get_dest_cgu(struct ice_hw *hw) +{ + /* On dual complex E825 only complex 0 has functional CGU powering all + * the PHYs. + * SBQ destination device cgu points to CGU on a current complex and to + * access primary CGU from the secondary complex, the driver should use + * cgu_peer as a destination device. + */ + if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) && + !ice_is_primary(hw)) + return ice_sbq_dev_cgu_peer; + return ice_sbq_dev_cgu; +} + /** * ice_read_cgu_reg - Read a CGU register * @hw: Pointer to the HW struct @@ -6514,8 +6545,8 @@ u32 ice_get_link_speed(u16 index) int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val) { struct ice_sbq_msg_input cgu_msg = { + .dest_dev = ice_get_dest_cgu(hw), .opcode = ice_sbq_msg_rd, - .dest_dev = ice_sbq_dev_cgu, .msg_addr_low = addr }; int err; @@ -6546,8 +6577,8 @@ int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val) int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val) { struct ice_sbq_msg_input cgu_msg = { + .dest_dev = ice_get_dest_cgu(hw), .opcode = ice_sbq_msg_wr, - .dest_dev = ice_sbq_dev_cgu, .msg_addr_low = addr, .data = val }; diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c index 06b8786ae3ab..c8cb492fddf4 100644 --- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c +++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c @@ -1475,7 +1475,7 @@ static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk) per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs; hw->blk[blk].masks.count = per_pf; - hw->blk[blk].masks.first = hw->pf_id * per_pf; + hw->blk[blk].masks.first = hw->logical_pf_id * per_pf; memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks)); diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h index 183dd5457d6a..21bb861febbf 100644 --- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h @@ -50,6 +50,7 @@ enum ice_sbq_dev_id { ice_sbq_dev_phy_0 = 0x02, ice_sbq_dev_cgu = 0x06, ice_sbq_dev_phy_0_peer = 0x0D, + ice_sbq_dev_cgu_peer = 0x0F, }; enum ice_sbq_msg_opcode { diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index f8a208c84f15..10e2445e0ded 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2281,7 +2281,7 @@ static int igb_get_sset_count(struct net_device *netdev, int sset) case ETH_SS_PRIV_FLAGS: return IGB_PRIV_FLAGS_STR_LEN; default: - return -ENOTSUPP; + return -EOPNOTSUPP; } } diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index f3e7218ba6f3..bb783042d1af 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -810,7 +810,7 @@ static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset) case ETH_SS_PRIV_FLAGS: return IGC_PRIV_FLAGS_STR_LEN; default: - return -ENOTSUPP; + return -EOPNOTSUPP; } } @@ -2094,6 +2094,9 @@ static void igc_ethtool_diag_test(struct net_device *netdev, netdev_info(adapter->netdev, "Offline testing starting"); set_bit(__IGC_TESTING, &adapter->state); + /* power up PHY for link test */ + igc_power_up_phy_copper(&adapter->hw); + /* Link test performed before hardware reset so autoneg doesn't * interfere with test result */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ca1ccc630001..3190ce7e44c7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -11507,10 +11507,10 @@ static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter) shutdown_aci: mutex_destroy(&adapter->hw.aci.lock); ixgbe_release_hw_control(adapter); - devlink_free(adapter->devlink); clean_up_probe: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); + devlink_free(adapter->devlink); pci_release_mem_regions(pdev); if (disable_dev) pci_disable_device(pdev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 114dd88fc71c..6885d2343c48 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -641,7 +641,7 @@ static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp, * disabled */ if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp) - return -ENOTSUPP; + return -EOPNOTSUPP; if (on) adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index d7a11ff9bbdb..da2d1eb52c13 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -320,7 +320,6 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq, err_free: kfree(buf); err_out: - priv_rx->rq_stats->tls_resync_req_skip++; return err; } @@ -339,14 +338,19 @@ static void resync_handle_work(struct work_struct *work) if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { mlx5e_ktls_priv_rx_put(priv_rx); + priv_rx->rq_stats->tls_resync_req_skip++; + tls_offload_rx_resync_async_request_cancel(&resync->core); return; } c = resync->priv->channels.c[priv_rx->rxq]; sq = &c->async_icosq; - if (resync_post_get_progress_params(sq, priv_rx)) + if (resync_post_get_progress_params(sq, priv_rx)) { + priv_rx->rq_stats->tls_resync_req_skip++; + tls_offload_rx_resync_async_request_cancel(&resync->core); mlx5e_ktls_priv_rx_put(priv_rx); + } } static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync, @@ -425,14 +429,21 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, { struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf; struct mlx5e_ktls_offload_context_rx *priv_rx; + struct tls_offload_resync_async *async_resync; + struct tls_offload_context_rx *rx_ctx; u8 tracker_state, auth_state, *ctx; struct device *dev; u32 hw_seq; priv_rx = buf->priv_rx; dev = mlx5_core_dma_dev(sq->channel->mdev); - if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) + rx_ctx = tls_offload_ctx_rx(tls_get_ctx(priv_rx->sk)); + async_resync = rx_ctx->resync_async; + if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) { + priv_rx->rq_stats->tls_resync_req_skip++; + tls_offload_rx_resync_async_request_cancel(async_resync); goto out; + } dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); @@ -443,11 +454,13 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING || auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) { priv_rx->rq_stats->tls_resync_req_skip++; + tls_offload_rx_resync_async_request_cancel(async_resync); goto out; } hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn); - tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq)); + tls_offload_rx_resync_async_request_end(async_resync, + cpu_to_be32(hw_seq)); priv_rx->rq_stats->tls_resync_req_end++; out: mlx5e_ktls_priv_rx_put(priv_rx); @@ -472,8 +485,10 @@ static bool resync_queue_get_psv(struct sock *sk) resync = &priv_rx->resync; mlx5e_ktls_priv_rx_get(priv_rx); - if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) + if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) { mlx5e_ktls_priv_rx_put(priv_rx); + return false; + } return true; } @@ -482,6 +497,7 @@ static bool resync_queue_get_psv(struct sock *sk) static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb) { struct ethhdr *eth = (struct ethhdr *)(skb->data); + struct tls_offload_resync_async *resync_async; struct net_device *netdev = rq->netdev; struct net *net = dev_net(netdev); struct sock *sk = NULL; @@ -527,7 +543,8 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb) seq = th->seq; datalen = skb->len - depth; - tls_offload_rx_resync_async_request_start(sk, seq, datalen); + resync_async = tls_offload_ctx_rx(tls_get_ctx(sk))->resync_async; + tls_offload_rx_resync_async_request_start(resync_async, seq, datalen); rq->stats->tls_resync_req_start++; unref: @@ -556,6 +573,18 @@ void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk, resync_handle_seq_match(priv_rx, c); } +void +mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi) +{ + struct mlx5e_ktls_offload_context_rx *priv_rx; + struct mlx5e_ktls_rx_resync_buf *buf; + + buf = wi->tls_get_params.buf; + priv_rx = buf->priv_rx; + priv_rx->rq_stats->tls_resync_req_skip++; + tls_offload_rx_resync_async_request_cancel(&priv_rx->resync.core); +} + /* End of resync section */ void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h index f87b65c560ea..cb08799769ee 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h @@ -29,6 +29,10 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi, u32 *dma_fifo_cc); + +void +mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi); + static inline bool mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 1c79adc51a04..26621a2972ec 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1036,6 +1036,10 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq) netdev_WARN_ONCE(cq->netdev, "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe)); +#ifdef CONFIG_MLX5_EN_TLS + if (wi->wqe_type == MLX5E_ICOSQ_WQE_GET_PSV_TLS) + mlx5e_ktls_rx_resync_async_request_cancel(wi); +#endif mlx5e_dump_error_cqe(&sq->cq, sq->sqn, (struct mlx5_err_cqe *)cqe); mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c index 76382626ad41..929adeb50a98 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c @@ -66,7 +66,6 @@ static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw) esw->fdb_table.legacy.addr_grp = NULL; esw->fdb_table.legacy.allmulti_grp = NULL; esw->fdb_table.legacy.promisc_grp = NULL; - atomic64_set(&esw->user_count, 0); } static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 9735a75732cf..4092ea29c630 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1978,7 +1978,6 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) /* Holds true only as long as DMFS is the default */ mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns, MLX5_FLOW_STEERING_MODE_DMFS); - atomic64_set(&esw->user_count, 0); } static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 132626a3f9f7..9ef72f294117 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2557,14 +2557,16 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info, err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar, &nn->tlv_caps); if (err) - goto err_free_nn; + goto err_free_xsk_pools; err = nfp_ccm_mbox_alloc(nn); if (err) - goto err_free_nn; + goto err_free_xsk_pools; return nn; +err_free_xsk_pools: + kfree(nn->dp.xsk_pools); err_free_nn: if (nn->dp.netdev) free_netdev(nn->dp.netdev); diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c index 6fd0c1e9a7d5..7cfd9000f79d 100644 --- a/drivers/net/ethernet/sfc/mae.c +++ b/drivers/net/ethernet/sfc/mae.c @@ -1090,6 +1090,9 @@ void efx_mae_remove_mport(void *desc, void *arg) kfree(mport); } +/* + * Takes ownership of @desc, even if it returns an error + */ static int efx_mae_process_mport(struct efx_nic *efx, struct mae_mport_desc *desc) { @@ -1100,6 +1103,7 @@ static int efx_mae_process_mport(struct efx_nic *efx, if (!IS_ERR_OR_NULL(mport)) { netif_err(efx, drv, efx->net_dev, "mport with id %u does exist!!!\n", desc->mport_id); + kfree(desc); return -EEXIST; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index ba4eeba14baa..1e69c1a7dc6c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -4127,18 +4127,11 @@ static int stmmac_release(struct net_device *dev) static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, struct stmmac_tx_queue *tx_q) { - u16 tag = 0x0, inner_tag = 0x0; - u32 inner_type = 0x0; struct dma_desc *p; + u16 tag = 0x0; - if (!priv->dma_cap.vlins) + if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb)) return false; - if (!skb_vlan_tag_present(skb)) - return false; - if (skb->vlan_proto == htons(ETH_P_8021AD)) { - inner_tag = skb_vlan_tag_get(skb); - inner_type = STMMAC_VLAN_INSERT; - } tag = skb_vlan_tag_get(skb); @@ -4147,7 +4140,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb, else p = &tx_q->dma_tx[tx_q->cur_tx]; - if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type)) + if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0)) return false; stmmac_set_tx_owner(priv, p); @@ -4545,6 +4538,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) bool has_vlan, set_ic; int entry, first_tx; dma_addr_t des; + u32 sdu_len; tx_q = &priv->dma_conf.tx_queue[queue]; txq_stats = &priv->xstats.txq_stats[queue]; @@ -4563,10 +4557,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) } if (priv->est && priv->est->enable && - priv->est->max_sdu[queue] && - skb->len > priv->est->max_sdu[queue]){ - priv->xstats.max_sdu_txq_drop[queue]++; - goto max_sdu_err; + priv->est->max_sdu[queue]) { + sdu_len = skb->len; + /* Add VLAN tag length if VLAN tag insertion offload is requested */ + if (priv->dma_cap.vlins && skb_vlan_tag_present(skb)) + sdu_len += VLAN_HLEN; + if (sdu_len > priv->est->max_sdu[queue]) { + priv->xstats.max_sdu_txq_drop[queue]++; + goto max_sdu_err; + } } if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { @@ -7700,11 +7699,8 @@ int stmmac_dvr_probe(struct device *device, ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; } - if (priv->dma_cap.vlins) { + if (priv->dma_cap.vlins) ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; - if (priv->dma_cap.dvlan) - ndev->features |= NETIF_F_HW_VLAN_STAG_TX; - } #endif priv->msg_enable = netif_msg_init(debug, default_msg_level); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index ef65cf511f3e..d78652718599 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -981,7 +981,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv, if (qopt->cmd == TAPRIO_CMD_DESTROY) goto disable; - if (qopt->num_entries >= dep) + if (qopt->num_entries > dep) return -EINVAL; if (!qopt->cycle_time) return -ERANGE; @@ -1012,7 +1012,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv, s64 delta_ns = qopt->entries[i].interval; u32 gates = qopt->entries[i].gate_mask; - if (delta_ns > GENMASK(wid, 0)) + if (delta_ns > GENMASK(wid - 1, 0)) return -ERANGE; if (gates > GENMASK(31 - wid, 0)) return -ERANGE; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c index 0b6f6228ae35..ff02a79c00d4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c @@ -212,7 +212,7 @@ static void vlan_enable(struct mac_device_info *hw, u32 type) value = readl(ioaddr + VLAN_INCL); value |= VLAN_VLTI; - value |= VLAN_CSVL; /* Only use SVLAN */ + value &= ~VLAN_CSVL; /* Only use CVLAN */ value &= ~VLAN_VLC; value |= (type << VLAN_VLC_SHIFT) & VLAN_VLC; writel(value, ioaddr + VLAN_INCL); diff --git a/drivers/net/mctp/mctp-usb.c b/drivers/net/mctp/mctp-usb.c index 36ccc53b1797..ef860cfc629f 100644 --- a/drivers/net/mctp/mctp-usb.c +++ b/drivers/net/mctp/mctp-usb.c @@ -96,11 +96,13 @@ static netdev_tx_t mctp_usb_start_xmit(struct sk_buff *skb, skb->data, skb->len, mctp_usb_out_complete, skb); + /* Stops TX queue first to prevent race condition with URB complete */ + netif_stop_queue(dev); rc = usb_submit_urb(urb, GFP_ATOMIC); - if (rc) + if (rc) { + netif_wake_queue(dev); goto err_drop; - else - netif_stop_queue(dev); + } return NETDEV_TX_OK; diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index 194570443493..5d8d0214786c 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -886,8 +886,11 @@ static ssize_t userdatum_value_show(struct config_item *item, char *buf) static void update_userdata(struct netconsole_target *nt) { - int complete_idx = 0, child_count = 0; struct list_head *entry; + int child_count = 0; + unsigned long flags; + + spin_lock_irqsave(&target_list_lock, flags); /* Clear the current string in case the last userdatum was deleted */ nt->userdata_length = 0; @@ -897,8 +900,11 @@ static void update_userdata(struct netconsole_target *nt) struct userdatum *udm_item; struct config_item *item; - if (WARN_ON_ONCE(child_count >= MAX_EXTRADATA_ITEMS)) - break; + if (child_count >= MAX_EXTRADATA_ITEMS) { + spin_unlock_irqrestore(&target_list_lock, flags); + WARN_ON_ONCE(1); + return; + } child_count++; item = container_of(entry, struct config_item, ci_entry); @@ -912,12 +918,11 @@ static void update_userdata(struct netconsole_target *nt) * one entry length (1/MAX_EXTRADATA_ITEMS long), entry count is * checked to not exceed MAX items with child_count above */ - complete_idx += scnprintf(&nt->extradata_complete[complete_idx], - MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n", - item->ci_name, udm_item->value); + nt->userdata_length += scnprintf(&nt->extradata_complete[nt->userdata_length], + MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n", + item->ci_name, udm_item->value); } - nt->userdata_length = strnlen(nt->extradata_complete, - sizeof(nt->extradata_complete)); + spin_unlock_irqrestore(&target_list_lock, flags); } static ssize_t userdatum_value_store(struct config_item *item, const char *buf, diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index deeefb962566..36a0c1b7f59c 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -738,6 +738,12 @@ static int dp83867_config_init(struct phy_device *phydev) return ret; } + /* Although the DP83867 reports EEE capability through the + * MDIO_PCS_EEE_ABLE and MDIO_AN_EEE_ADV registers, the feature + * is not actually implemented in hardware. + */ + phy_disable_eee(phydev); + if (phy_interface_is_rgmii(phydev) || phydev->interface == PHY_INTERFACE_MODE_SGMII) { val = phy_read(phydev, MII_DP83867_PHYCTRL); diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c index a2cd1cc35cde..1f381d7b13ff 100644 --- a/drivers/net/phy/dp83869.c +++ b/drivers/net/phy/dp83869.c @@ -84,7 +84,7 @@ #define DP83869_CLK_DELAY_DEF 7 /* STRAP_STS1 bits */ -#define DP83869_STRAP_OP_MODE_MASK GENMASK(2, 0) +#define DP83869_STRAP_OP_MODE_MASK GENMASK(11, 9) #define DP83869_STRAP_STS1_RESERVED BIT(11) #define DP83869_STRAP_MIRROR_ENABLED BIT(12) @@ -528,7 +528,7 @@ static int dp83869_set_strapped_mode(struct phy_device *phydev) if (val < 0) return val; - dp83869->mode = val & DP83869_STRAP_OP_MODE_MASK; + dp83869->mode = FIELD_GET(DP83869_STRAP_OP_MODE_MASK, val); return 0; } diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index 85bd5d845409..232bbd79a4de 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -230,7 +230,9 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf) int i; unsigned long gpio_bits = dev->driver_info->data; - usbnet_get_endpoints(dev,intf); + ret = usbnet_get_endpoints(dev, intf); + if (ret) + goto out; /* Toggle the GPIOs in a manufacturer/model specific way */ for (i = 2; i >= 0; i--) { @@ -848,7 +850,9 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) dev->driver_priv = priv; - usbnet_get_endpoints(dev, intf); + ret = usbnet_get_endpoints(dev, intf); + if (ret) + return ret; /* Maybe the boot loader passed the MAC address via device tree */ if (!eth_platform_get_mac_address(&dev->udev->dev, buf)) { @@ -1281,7 +1285,9 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf) int ret; u8 buf[ETH_ALEN] = {0}; - usbnet_get_endpoints(dev,intf); + ret = usbnet_get_endpoints(dev, intf); + if (ret) + return ret; /* Get the MAC address */ ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0); diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 62a85dbad31a..f3087fb62f4f 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1658,6 +1658,8 @@ void usbnet_disconnect(struct usb_interface *intf) net = dev->net; unregister_netdev (net); + cancel_work_sync(&dev->kevent); + while ((urb = usb_get_from_anchor(&dev->deferred))) { dev_kfree_skb(urb->context); kfree(urb->sg); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index a757cbcab87f..8e8a179aaa49 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1379,9 +1379,14 @@ static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct ret = XDP_PASS; rcu_read_lock(); prog = rcu_dereference(rq->xdp_prog); - /* TODO: support multi buffer. */ - if (prog && num_buf == 1) - ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats); + if (prog) { + /* TODO: support multi buffer. */ + if (num_buf == 1) + ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, + stats); + else + ret = XDP_ABORTED; + } rcu_read_unlock(); switch (ret) { diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index e595b0979a56..b3b00d324075 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -1937,6 +1937,7 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id) if (cmd_id == WMI_CMD_UNSUPPORTED) { ath10k_warn(ar, "wmi command %d is not supported by firmware\n", cmd_id); + dev_kfree_skb_any(skb); return ret; } diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index 2810752260f2..812686173ac8 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -912,42 +912,84 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { static const struct dmi_system_id ath11k_pm_quirk_table[] = { { .driver_data = (void *)ATH11K_PM_WOW, - .matches = { + .matches = { /* X13 G4 AMD #1 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21J3"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { /* X13 G4 AMD #2 */ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21J4"), }, }, { .driver_data = (void *)ATH11K_PM_WOW, - .matches = { + .matches = { /* T14 G4 AMD #1 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21K3"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { /* T14 G4 AMD #2 */ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21K4"), }, }, { .driver_data = (void *)ATH11K_PM_WOW, - .matches = { + .matches = { /* P14s G4 AMD #1 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21K5"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { /* P14s G4 AMD #2 */ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21K6"), }, }, { .driver_data = (void *)ATH11K_PM_WOW, - .matches = { + .matches = { /* T16 G2 AMD #1 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21K7"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { /* T16 G2 AMD #2 */ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21K8"), }, }, { .driver_data = (void *)ATH11K_PM_WOW, - .matches = { + .matches = { /* P16s G2 AMD #1 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21K9"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { /* P16s G2 AMD #2 */ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21KA"), }, }, { .driver_data = (void *)ATH11K_PM_WOW, - .matches = { + .matches = { /* T14s G4 AMD #1 */ + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21F8"), + }, + }, + { + .driver_data = (void *)ATH11K_PM_WOW, + .matches = { /* T14s G4 AMD #2 */ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "21F9"), }, diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 106e2530b64e..0e41b5a91d66 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */ #include @@ -4417,9 +4417,9 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, } if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) - flags |= WMI_KEY_PAIRWISE; + flags = WMI_KEY_PAIRWISE; else - flags |= WMI_KEY_GROUP; + flags = WMI_KEY_GROUP; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "%s for peer %pM on vdev %d flags 0x%X, type = %d, num_sta %d\n", @@ -4456,7 +4456,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, is_ap_with_no_sta = (vif->type == NL80211_IFTYPE_AP && !arvif->num_stations); - if ((flags & WMI_KEY_PAIRWISE) || cmd == SET_KEY || is_ap_with_no_sta) { + if (flags == WMI_KEY_PAIRWISE || cmd == SET_KEY || is_ap_with_no_sta) { ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags); if (ret) { ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret); @@ -4470,7 +4470,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, goto exit; } - if ((flags & WMI_KEY_GROUP) && cmd == SET_KEY && is_ap_with_no_sta) + if (flags == WMI_KEY_GROUP && cmd == SET_KEY && is_ap_with_no_sta) arvif->reinstall_group_keys = true; } diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index 1d7b60aa5cb0..eacab798630a 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -8290,23 +8290,32 @@ static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb) wake_up(&ar->txmgmt_empty_waitq); } -int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) +static void ath12k_mac_tx_mgmt_free(struct ath12k *ar, int buf_id) { - struct sk_buff *msdu = skb; + struct sk_buff *msdu; struct ieee80211_tx_info *info; - struct ath12k *ar = ctx; - struct ath12k_base *ab = ar->ab; spin_lock_bh(&ar->txmgmt_idr_lock); - idr_remove(&ar->txmgmt_idr, buf_id); + msdu = idr_remove(&ar->txmgmt_idr, buf_id); spin_unlock_bh(&ar->txmgmt_idr_lock); - dma_unmap_single(ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len, + + if (!msdu) + return; + + dma_unmap_single(ar->ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len, DMA_TO_DEVICE); info = IEEE80211_SKB_CB(msdu); memset(&info->status, 0, sizeof(info->status)); - ath12k_mgmt_over_wmi_tx_drop(ar, skb); + ath12k_mgmt_over_wmi_tx_drop(ar, msdu); +} + +int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx) +{ + struct ath12k *ar = ctx; + + ath12k_mac_tx_mgmt_free(ar, buf_id); return 0; } @@ -8315,17 +8324,10 @@ static int ath12k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx) { struct ieee80211_vif *vif = ctx; struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); - struct sk_buff *msdu = skb; struct ath12k *ar = skb_cb->ar; - struct ath12k_base *ab = ar->ab; - if (skb_cb->vif == vif) { - spin_lock_bh(&ar->txmgmt_idr_lock); - idr_remove(&ar->txmgmt_idr, buf_id); - spin_unlock_bh(&ar->txmgmt_idr_lock); - dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, - DMA_TO_DEVICE); - } + if (skb_cb->vif == vif) + ath12k_mac_tx_mgmt_free(ar, buf_id); return 0; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 8afaffe31031..bb96b87b2a6e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -5627,8 +5627,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, *cookie, le16_to_cpu(action_frame->len), le32_to_cpu(af_params->channel)); - ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg), - af_params); + ack = brcmf_p2p_send_action_frame(vif->ifp, af_params); cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack, GFP_KERNEL); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index 0dc9d28cd77b..e1752a513c73 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -1529,6 +1529,7 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp, /** * brcmf_p2p_tx_action_frame() - send action frame over fil. * + * @ifp: interface to transmit on. * @p2p: p2p info struct for vif. * @af_params: action frame data/info. * @@ -1538,12 +1539,11 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp, * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action * frame is transmitted. */ -static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p, +static s32 brcmf_p2p_tx_action_frame(struct brcmf_if *ifp, + struct brcmf_p2p_info *p2p, struct brcmf_fil_af_params_le *af_params) { struct brcmf_pub *drvr = p2p->cfg->pub; - struct brcmf_cfg80211_vif *vif; - struct brcmf_p2p_action_frame *p2p_af; s32 err = 0; brcmf_dbg(TRACE, "Enter\n"); @@ -1552,14 +1552,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p, clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status); clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status); - /* check if it is a p2p_presence response */ - p2p_af = (struct brcmf_p2p_action_frame *)af_params->action_frame.data; - if (p2p_af->subtype == P2P_AF_PRESENCE_RSP) - vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif; - else - vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif; - - err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params, + err = brcmf_fil_bsscfg_data_set(ifp, "actframe", af_params, sizeof(*af_params)); if (err) { bphy_err(drvr, " sending action frame has failed\n"); @@ -1711,16 +1704,14 @@ static bool brcmf_p2p_check_dwell_overflow(u32 requested_dwell, /** * brcmf_p2p_send_action_frame() - send action frame . * - * @cfg: driver private data for cfg80211 interface. - * @ndev: net device to transmit on. + * @ifp: interface to transmit on. * @af_params: configuration data for action frame. */ -bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, - struct net_device *ndev, +bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp, struct brcmf_fil_af_params_le *af_params) { + struct brcmf_cfg80211_info *cfg = ifp->drvr->config; struct brcmf_p2p_info *p2p = &cfg->p2p; - struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_fil_action_frame_le *action_frame; struct brcmf_config_af_params config_af_params; struct afx_hdl *afx_hdl = &p2p->afx_hdl; @@ -1857,7 +1848,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, if (af_params->channel) msleep(P2P_AF_RETRY_DELAY_TIME); - ack = !brcmf_p2p_tx_action_frame(p2p, af_params); + ack = !brcmf_p2p_tx_action_frame(ifp, p2p, af_params); tx_retry++; dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell, dwell_jiffies); @@ -2217,7 +2208,6 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, WARN_ON(p2p_ifp->bsscfgidx != bsscfgidx); - init_completion(&p2p->send_af_done); INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler); init_completion(&p2p->afx_hdl.act_frm_scan); init_completion(&p2p->wait_next_af); @@ -2513,6 +2503,8 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced) pri_ifp = brcmf_get_ifp(cfg->pub, 0); p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif; + init_completion(&p2p->send_af_done); + if (p2pdev_forced) { err_ptr = brcmf_p2p_create_p2pdev(p2p, NULL, NULL); if (IS_ERR(err_ptr)) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h index d2ecee565bf2..d3137ebd7158 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.h @@ -168,8 +168,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp, int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp, const struct brcmf_event_msg *e, void *data); -bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg, - struct net_device *ndev, +bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp, struct brcmf_fil_af_params_le *af_params); bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg, struct brcmf_bss_info_le *bi); diff --git a/drivers/net/wireless/intel/iwlwifi/mld/link.c b/drivers/net/wireless/intel/iwlwifi/mld/link.c index 738f80fe0c50..60d814bf5779 100644 --- a/drivers/net/wireless/intel/iwlwifi/mld/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mld/link.c @@ -501,6 +501,7 @@ void iwl_mld_remove_link(struct iwl_mld *mld, struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif); struct iwl_mld_link *link = iwl_mld_link_from_mac80211(bss_conf); bool is_deflink = link == &mld_vif->deflink; + u8 fw_id = link->fw_id; if (WARN_ON(!link || link->active)) return; @@ -513,10 +514,10 @@ void iwl_mld_remove_link(struct iwl_mld *mld, RCU_INIT_POINTER(mld_vif->link[bss_conf->link_id], NULL); - if (WARN_ON(link->fw_id >= mld->fw->ucode_capa.num_links)) + if (WARN_ON(fw_id >= mld->fw->ucode_capa.num_links)) return; - RCU_INIT_POINTER(mld->fw_id_to_bss_conf[link->fw_id], NULL); + RCU_INIT_POINTER(mld->fw_id_to_bss_conf[fw_id], NULL); } void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld, diff --git a/drivers/nvmem/rcar-efuse.c b/drivers/nvmem/rcar-efuse.c index f24bdb9cb5a7..d9a96a1d59c8 100644 --- a/drivers/nvmem/rcar-efuse.c +++ b/drivers/nvmem/rcar-efuse.c @@ -127,6 +127,7 @@ static const struct of_device_id rcar_fuse_match[] = { { .compatible = "renesas,r8a779h0-otp", .data = &rcar_fuse_v4m }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, rcar_fuse_match); static struct platform_driver rcar_fuse_driver = { .probe = rcar_fuse_probe, diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 65c3c23255b7..1cd93549d093 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -671,6 +671,36 @@ void __init of_irq_init(const struct of_device_id *matches) } } +static int of_check_msi_parent(struct device_node *dev_node, struct device_node **msi_node) +{ + struct of_phandle_args msi_spec; + int ret; + + /* + * An msi-parent phandle with a missing or == 0 #msi-cells + * property identifies a 1:1 ID translation mapping. + * + * Set the msi controller node if the firmware matches this + * condition. + */ + ret = of_parse_phandle_with_optional_args(dev_node, "msi-parent", "#msi-cells", + 0, &msi_spec); + if (ret) + return ret; + + if ((*msi_node && *msi_node != msi_spec.np) || msi_spec.args_count != 0) + ret = -EINVAL; + + if (!ret) { + /* Return with a node reference held */ + *msi_node = msi_spec.np; + return 0; + } + of_node_put(msi_spec.np); + + return ret; +} + /** * of_msi_xlate - map a MSI ID and find relevant MSI controller node * @dev: device for which the mapping is to be done. @@ -678,7 +708,7 @@ void __init of_irq_init(const struct of_device_id *matches) * @id_in: Device ID. * * Walk up the device hierarchy looking for devices with a "msi-map" - * property. If found, apply the mapping to @id_in. + * or "msi-parent" property. If found, apply the mapping to @id_in. * If @msi_np points to a non-NULL device node pointer, only entries targeting * that node will be matched; if it points to a NULL value, it will receive the * device node of the first matching target phandle, with a reference held. @@ -692,14 +722,18 @@ u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in) /* * Walk up the device parent links looking for one with a - * "msi-map" property. + * "msi-map" or an "msi-parent" property. */ - for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) + for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent) { if (!of_map_id(parent_dev->of_node, id_in, "msi-map", "msi-map-mask", msi_np, &id_out)) break; + if (!of_check_msi_parent(parent_dev->of_node, msi_np)) + break; + } return id_out; } +EXPORT_SYMBOL_GPL(of_msi_xlate); /** * of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain @@ -741,8 +775,10 @@ struct irq_domain *of_msi_get_domain(struct device *dev, of_for_each_phandle(&it, err, np, "msi-parent", "#msi-cells", 0) { d = irq_find_matching_host(it.node, token); - if (d) + if (d) { + of_node_put(it.node); return d; + } } return NULL; diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 20c9333bcb1c..e92513c5bda5 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -23,6 +23,7 @@ #include "pcie-designware.h" static struct pci_ops dw_pcie_ops; +static struct pci_ops dw_pcie_ecam_ops; static struct pci_ops dw_child_pcie_ops; #define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ @@ -471,9 +472,6 @@ static int dw_pcie_create_ecam_window(struct dw_pcie_rp *pp, struct resource *re if (IS_ERR(pp->cfg)) return PTR_ERR(pp->cfg); - pci->dbi_base = pp->cfg->win; - pci->dbi_phys_addr = res->start; - return 0; } @@ -529,7 +527,7 @@ static int dw_pcie_host_get_resources(struct dw_pcie_rp *pp) if (ret) return ret; - pp->bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops; + pp->bridge->ops = &dw_pcie_ecam_ops; pp->bridge->sysdata = pp->cfg; pp->cfg->priv = pp; } else { @@ -842,12 +840,34 @@ void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn, } EXPORT_SYMBOL_GPL(dw_pcie_own_conf_map_bus); +static void __iomem *dw_pcie_ecam_conf_map_bus(struct pci_bus *bus, unsigned int devfn, int where) +{ + struct pci_config_window *cfg = bus->sysdata; + struct dw_pcie_rp *pp = cfg->priv; + struct dw_pcie *pci = to_dw_pcie_from_pp(pp); + unsigned int busn = bus->number; + + if (busn > 0) + return pci_ecam_map_bus(bus, devfn, where); + + if (PCI_SLOT(devfn) > 0) + return NULL; + + return pci->dbi_base + where; +} + static struct pci_ops dw_pcie_ops = { .map_bus = dw_pcie_own_conf_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, }; +static struct pci_ops dw_pcie_ecam_ops = { + .map_bus = dw_pcie_ecam_conf_map_bus, + .read = pci_generic_config_read, + .write = pci_generic_config_write, +}; + static int dw_pcie_iatu_setup(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 805edbbfe7eb..6948824642dc 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -55,7 +55,6 @@ #define PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1a8 #define PARF_Q2A_FLUSH 0x1ac #define PARF_LTSSM 0x1b0 -#define PARF_SLV_DBI_ELBI 0x1b4 #define PARF_INT_ALL_STATUS 0x224 #define PARF_INT_ALL_CLEAR 0x228 #define PARF_INT_ALL_MASK 0x22c @@ -65,16 +64,6 @@ #define PARF_DBI_BASE_ADDR_V2_HI 0x354 #define PARF_SLV_ADDR_SPACE_SIZE_V2 0x358 #define PARF_SLV_ADDR_SPACE_SIZE_V2_HI 0x35c -#define PARF_BLOCK_SLV_AXI_WR_BASE 0x360 -#define PARF_BLOCK_SLV_AXI_WR_BASE_HI 0x364 -#define PARF_BLOCK_SLV_AXI_WR_LIMIT 0x368 -#define PARF_BLOCK_SLV_AXI_WR_LIMIT_HI 0x36c -#define PARF_BLOCK_SLV_AXI_RD_BASE 0x370 -#define PARF_BLOCK_SLV_AXI_RD_BASE_HI 0x374 -#define PARF_BLOCK_SLV_AXI_RD_LIMIT 0x378 -#define PARF_BLOCK_SLV_AXI_RD_LIMIT_HI 0x37c -#define PARF_ECAM_BASE 0x380 -#define PARF_ECAM_BASE_HI 0x384 #define PARF_NO_SNOOP_OVERRIDE 0x3d4 #define PARF_ATU_BASE_ADDR 0x634 #define PARF_ATU_BASE_ADDR_HI 0x638 @@ -98,7 +87,6 @@ /* PARF_SYS_CTRL register fields */ #define MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN BIT(29) -#define PCIE_ECAM_BLOCKER_EN BIT(26) #define MST_WAKEUP_EN BIT(13) #define SLV_WAKEUP_EN BIT(12) #define MSTR_ACLK_CGC_DIS BIT(10) @@ -146,9 +134,6 @@ /* PARF_LTSSM register fields */ #define LTSSM_EN BIT(8) -/* PARF_SLV_DBI_ELBI */ -#define SLV_DBI_ELBI_ADDR_BASE GENMASK(11, 0) - /* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */ #define PARF_INT_ALL_LINK_UP BIT(13) #define PARF_INT_MSI_DEV_0_7 GENMASK(30, 23) @@ -326,47 +311,6 @@ static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) qcom_perst_assert(pcie, false); } -static void qcom_pci_config_ecam(struct dw_pcie_rp *pp) -{ - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - struct qcom_pcie *pcie = to_qcom_pcie(pci); - u64 addr, addr_end; - u32 val; - - writel_relaxed(lower_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE); - writel_relaxed(upper_32_bits(pci->dbi_phys_addr), pcie->parf + PARF_ECAM_BASE_HI); - - /* - * The only device on the root bus is a single Root Port. If we try to - * access any devices other than Device/Function 00.0 on Bus 0, the TLP - * will go outside of the controller to the PCI bus. But with CFG Shift - * Feature (ECAM) enabled in iATU, there is no guarantee that the - * response is going to be all F's. Hence, to make sure that the - * requester gets all F's response for accesses other than the Root - * Port, configure iATU to block the transactions starting from - * function 1 of the root bus to the end of the root bus (i.e., from - * dbi_base + 4KB to dbi_base + 1MB). - */ - addr = pci->dbi_phys_addr + SZ_4K; - writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE); - writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_WR_BASE_HI); - - writel_relaxed(lower_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE); - writel_relaxed(upper_32_bits(addr), pcie->parf + PARF_BLOCK_SLV_AXI_RD_BASE_HI); - - addr_end = pci->dbi_phys_addr + SZ_1M - 1; - - writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT); - writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_WR_LIMIT_HI); - - writel_relaxed(lower_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT); - writel_relaxed(upper_32_bits(addr_end), pcie->parf + PARF_BLOCK_SLV_AXI_RD_LIMIT_HI); - - val = readl_relaxed(pcie->parf + PARF_SYS_CTRL); - val |= PCIE_ECAM_BLOCKER_EN; - writel_relaxed(val, pcie->parf + PARF_SYS_CTRL); -} - static int qcom_pcie_start_link(struct dw_pcie *pci) { struct qcom_pcie *pcie = to_qcom_pcie(pci); @@ -1320,7 +1264,6 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct qcom_pcie *pcie = to_qcom_pcie(pci); - u16 offset; int ret; qcom_ep_reset_assert(pcie); @@ -1329,17 +1272,6 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp) if (ret) return ret; - if (pp->ecam_enabled) { - /* - * Override ELBI when ECAM is enabled, as when ECAM is enabled, - * ELBI moves under the 'config' space. - */ - offset = FIELD_GET(SLV_DBI_ELBI_ADDR_BASE, readl(pcie->parf + PARF_SLV_DBI_ELBI)); - pci->elbi_base = pci->dbi_base + offset; - - qcom_pci_config_ecam(pp); - } - ret = qcom_pcie_phy_power_on(pcie); if (ret) goto err_deinit; diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 7cc8281e7011..79b965158473 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -243,8 +243,7 @@ struct pcie_link_state { /* Clock PM state */ u32 clkpm_capable:1; /* Clock PM capable? */ u32 clkpm_enabled:1; /* Current Clock PM state */ - u32 clkpm_default:1; /* Default Clock PM state by BIOS or - override */ + u32 clkpm_default:1; /* Default Clock PM state by BIOS */ u32 clkpm_disable:1; /* Clock PM disabled */ }; @@ -376,18 +375,6 @@ static void pcie_set_clkpm(struct pcie_link_state *link, int enable) pcie_set_clkpm_nocheck(link, enable); } -static void pcie_clkpm_override_default_link_state(struct pcie_link_state *link, - int enabled) -{ - struct pci_dev *pdev = link->downstream; - - /* For devicetree platforms, enable ClockPM by default */ - if (of_have_populated_dt() && !enabled) { - link->clkpm_default = 1; - pci_info(pdev, "ASPM: DT platform, enabling ClockPM\n"); - } -} - static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) { int capable = 1, enabled = 1; @@ -410,7 +397,6 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) } link->clkpm_enabled = enabled; link->clkpm_default = enabled; - pcie_clkpm_override_default_link_state(link, enabled); link->clkpm_capable = capable; link->clkpm_disable = blacklist ? 1 : 0; } @@ -811,19 +797,17 @@ static void pcie_aspm_override_default_link_state(struct pcie_link_state *link) struct pci_dev *pdev = link->downstream; u32 override; - /* For devicetree platforms, enable all ASPM states by default */ + /* For devicetree platforms, enable L0s and L1 by default */ if (of_have_populated_dt()) { - link->aspm_default = PCIE_LINK_STATE_ASPM_ALL; + if (link->aspm_support & PCIE_LINK_STATE_L0S) + link->aspm_default |= PCIE_LINK_STATE_L0S; + if (link->aspm_support & PCIE_LINK_STATE_L1) + link->aspm_default |= PCIE_LINK_STATE_L1; override = link->aspm_default & ~link->aspm_enabled; if (override) - pci_info(pdev, "ASPM: DT platform, enabling%s%s%s%s%s%s%s\n", - FLAG(override, L0S_UP, " L0s-up"), - FLAG(override, L0S_DW, " L0s-dw"), - FLAG(override, L1, " L1"), - FLAG(override, L1_1, " ASPM-L1.1"), - FLAG(override, L1_2, " ASPM-L1.2"), - FLAG(override, L1_1_PCIPM, " PCI-PM-L1.1"), - FLAG(override, L1_2_PCIPM, " PCI-PM-L1.2")); + pci_info(pdev, "ASPM: default states%s%s\n", + FLAG(override, L0S, " L0s"), + FLAG(override, L1, " L1")); } } diff --git a/drivers/spi/spi-airoha-snfi.c b/drivers/spi/spi-airoha-snfi.c index dbe640986825..b78163eaed61 100644 --- a/drivers/spi/spi-airoha-snfi.c +++ b/drivers/spi/spi-airoha-snfi.c @@ -192,6 +192,14 @@ #define SPI_NAND_OP_RESET 0xff #define SPI_NAND_OP_DIE_SELECT 0xc2 +/* SNAND FIFO commands */ +#define SNAND_FIFO_TX_BUSWIDTH_SINGLE 0x08 +#define SNAND_FIFO_TX_BUSWIDTH_DUAL 0x09 +#define SNAND_FIFO_TX_BUSWIDTH_QUAD 0x0a +#define SNAND_FIFO_RX_BUSWIDTH_SINGLE 0x0c +#define SNAND_FIFO_RX_BUSWIDTH_DUAL 0x0e +#define SNAND_FIFO_RX_BUSWIDTH_QUAD 0x0f + #define SPI_NAND_CACHE_SIZE (SZ_4K + SZ_256) #define SPI_MAX_TRANSFER_SIZE 511 @@ -387,10 +395,26 @@ static int airoha_snand_set_mode(struct airoha_snand_ctrl *as_ctrl, return regmap_write(as_ctrl->regmap_ctrl, REG_SPI_CTRL_DUMMY, 0); } -static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd, - const u8 *data, int len) +static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, + const u8 *data, int len, int buswidth) { int i, data_len; + u8 cmd; + + switch (buswidth) { + case 0: + case 1: + cmd = SNAND_FIFO_TX_BUSWIDTH_SINGLE; + break; + case 2: + cmd = SNAND_FIFO_TX_BUSWIDTH_DUAL; + break; + case 4: + cmd = SNAND_FIFO_TX_BUSWIDTH_QUAD; + break; + default: + return -EINVAL; + } for (i = 0; i < len; i += data_len) { int err; @@ -409,16 +433,32 @@ static int airoha_snand_write_data(struct airoha_snand_ctrl *as_ctrl, u8 cmd, return 0; } -static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl, u8 *data, - int len) +static int airoha_snand_read_data(struct airoha_snand_ctrl *as_ctrl, + u8 *data, int len, int buswidth) { int i, data_len; + u8 cmd; + + switch (buswidth) { + case 0: + case 1: + cmd = SNAND_FIFO_RX_BUSWIDTH_SINGLE; + break; + case 2: + cmd = SNAND_FIFO_RX_BUSWIDTH_DUAL; + break; + case 4: + cmd = SNAND_FIFO_RX_BUSWIDTH_QUAD; + break; + default: + return -EINVAL; + } for (i = 0; i < len; i += data_len) { int err; data_len = min(len - i, SPI_MAX_TRANSFER_SIZE); - err = airoha_snand_set_fifo_op(as_ctrl, 0xc, data_len); + err = airoha_snand_set_fifo_op(as_ctrl, cmd, data_len); if (err) return err; @@ -618,6 +658,10 @@ static int airoha_snand_dirmap_create(struct spi_mem_dirmap_desc *desc) if (desc->info.offset + desc->info.length > U32_MAX) return -EINVAL; + /* continuous reading is not supported */ + if (desc->info.length > SPI_NAND_CACHE_SIZE) + return -E2BIG; + if (!airoha_snand_supports_op(desc->mem, &desc->info.op_tmpl)) return -EOPNOTSUPP; @@ -654,13 +698,13 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc, err = airoha_snand_nfi_config(as_ctrl); if (err) - return err; + goto error_dma_mode_off; dma_addr = dma_map_single(as_ctrl->dev, txrx_buf, SPI_NAND_CACHE_SIZE, DMA_FROM_DEVICE); err = dma_mapping_error(as_ctrl->dev, dma_addr); if (err) - return err; + goto error_dma_mode_off; /* set dma addr */ err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_STRADDR, @@ -689,8 +733,9 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc, if (err) goto error_dma_unmap; - /* set read addr */ - err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL3, 0x0); + /* set read addr: zero page offset + descriptor read offset */ + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_RD_CTL3, + desc->info.offset); if (err) goto error_dma_unmap; @@ -760,6 +805,8 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc, error_dma_unmap: dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE, DMA_FROM_DEVICE); +error_dma_mode_off: + airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL); return err; } @@ -824,7 +871,9 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc, if (err) goto error_dma_unmap; - err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL2, 0x0); + /* set write addr: zero page offset + descriptor write offset */ + err = regmap_write(as_ctrl->regmap_nfi, REG_SPI_NFI_PG_CTL2, + desc->info.offset); if (err) goto error_dma_unmap; @@ -892,18 +941,35 @@ static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc, error_dma_unmap: dma_unmap_single(as_ctrl->dev, dma_addr, SPI_NAND_CACHE_SIZE, DMA_TO_DEVICE); + airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL); return err; } static int airoha_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) { - u8 data[8], cmd, opcode = op->cmd.opcode; struct airoha_snand_ctrl *as_ctrl; + int op_len, addr_len, dummy_len; + u8 buf[20], *data; int i, err; as_ctrl = spi_controller_get_devdata(mem->spi->controller); + op_len = op->cmd.nbytes; + addr_len = op->addr.nbytes; + dummy_len = op->dummy.nbytes; + + if (op_len + dummy_len + addr_len > sizeof(buf)) + return -EIO; + + data = buf; + for (i = 0; i < op_len; i++) + *data++ = op->cmd.opcode >> (8 * (op_len - i - 1)); + for (i = 0; i < addr_len; i++) + *data++ = op->addr.val >> (8 * (addr_len - i - 1)); + for (i = 0; i < dummy_len; i++) + *data++ = 0xff; + /* switch to manual mode */ err = airoha_snand_set_mode(as_ctrl, SPI_MODE_MANUAL); if (err < 0) @@ -914,40 +980,40 @@ static int airoha_snand_exec_op(struct spi_mem *mem, return err; /* opcode */ - err = airoha_snand_write_data(as_ctrl, 0x8, &opcode, sizeof(opcode)); + data = buf; + err = airoha_snand_write_data(as_ctrl, data, op_len, + op->cmd.buswidth); if (err) return err; /* addr part */ - cmd = opcode == SPI_NAND_OP_GET_FEATURE ? 0x11 : 0x8; - put_unaligned_be64(op->addr.val, data); - - for (i = ARRAY_SIZE(data) - op->addr.nbytes; - i < ARRAY_SIZE(data); i++) { - err = airoha_snand_write_data(as_ctrl, cmd, &data[i], - sizeof(data[0])); + data += op_len; + if (addr_len) { + err = airoha_snand_write_data(as_ctrl, data, addr_len, + op->addr.buswidth); if (err) return err; } /* dummy */ - data[0] = 0xff; - for (i = 0; i < op->dummy.nbytes; i++) { - err = airoha_snand_write_data(as_ctrl, 0x8, &data[0], - sizeof(data[0])); + data += addr_len; + if (dummy_len) { + err = airoha_snand_write_data(as_ctrl, data, dummy_len, + op->dummy.buswidth); if (err) return err; } /* data */ - if (op->data.dir == SPI_MEM_DATA_IN) { - err = airoha_snand_read_data(as_ctrl, op->data.buf.in, - op->data.nbytes); - if (err) - return err; - } else { - err = airoha_snand_write_data(as_ctrl, 0x8, op->data.buf.out, - op->data.nbytes); + if (op->data.nbytes) { + if (op->data.dir == SPI_MEM_DATA_IN) + err = airoha_snand_read_data(as_ctrl, op->data.buf.in, + op->data.nbytes, + op->data.buswidth); + else + err = airoha_snand_write_data(as_ctrl, op->data.buf.out, + op->data.nbytes, + op->data.buswidth); if (err) return err; } diff --git a/drivers/spi/spi-amlogic-spifc-a4.c b/drivers/spi/spi-amlogic-spifc-a4.c index 4338d00e56a6..35a7c4965e11 100644 --- a/drivers/spi/spi-amlogic-spifc-a4.c +++ b/drivers/spi/spi-amlogic-spifc-a4.c @@ -286,7 +286,7 @@ static int aml_sfc_set_bus_width(struct aml_sfc *sfc, u8 buswidth, u32 mask) for (i = 0; i <= LANE_MAX; i++) { if (buswidth == 1 << i) { - conf = i << __bf_shf(mask); + conf = i << __ffs(mask); return regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG, mask, conf); } @@ -566,7 +566,7 @@ static int aml_sfc_raw_io_op(struct aml_sfc *sfc, const struct spi_mem_op *op) if (!op->data.nbytes) goto end_xfer; - conf = (op->data.nbytes >> RAW_SIZE_BW) << __bf_shf(RAW_EXT_SIZE); + conf = (op->data.nbytes >> RAW_SIZE_BW) << __ffs(RAW_EXT_SIZE); ret = regmap_update_bits(sfc->regmap_base, SFC_SPI_CFG, RAW_EXT_SIZE, conf); if (ret) goto err_out; diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 8fb13df8ff87..81017402bc56 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1995,7 +1995,7 @@ static int cqspi_probe(struct platform_device *pdev) if (cqspi->use_direct_mode) { ret = cqspi_request_mmap_dma(cqspi); if (ret == -EPROBE_DEFER) - goto probe_setup_failed; + goto probe_dma_failed; } if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) { @@ -2019,9 +2019,10 @@ static int cqspi_probe(struct platform_device *pdev) return 0; probe_setup_failed: - cqspi_controller_enable(cqspi, 0); if (!(ddata && (ddata->quirks & CQSPI_DISABLE_RUNTIME_PM))) pm_runtime_disable(dev); +probe_dma_failed: + cqspi_controller_enable(cqspi, 0); probe_reset_failed: if (cqspi->is_jh7110) cqspi_jh7110_disable_clk(pdev, cqspi); diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index f0f576fac77a..7a5197586919 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -358,7 +358,9 @@ static int dw_spi_mmio_probe(struct platform_device *pdev) if (IS_ERR(dwsmmio->rstc)) return PTR_ERR(dwsmmio->rstc); - reset_control_deassert(dwsmmio->rstc); + ret = reset_control_deassert(dwsmmio->rstc); + if (ret) + return dev_err_probe(&pdev->dev, ret, "Failed to deassert resets\n"); dws->bus_num = pdev->id; diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c index 4b63cb98df9c..7765fb27c37c 100644 --- a/drivers/spi/spi-intel-pci.c +++ b/drivers/spi/spi-intel-pci.c @@ -75,10 +75,12 @@ static const struct pci_device_id intel_spi_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x38a4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x4d23), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0x51a4), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x54a4), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x5794), (unsigned long)&cnl_info }, + { PCI_VDEVICE(INTEL, 0x7723), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info }, diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c index 13bbb2133507..1775ad39e633 100644 --- a/drivers/spi/spi-intel.c +++ b/drivers/spi/spi-intel.c @@ -132,6 +132,7 @@ #define FLCOMP_C0DEN_16M 0x05 #define FLCOMP_C0DEN_32M 0x06 #define FLCOMP_C0DEN_64M 0x07 +#define FLCOMP_C0DEN_128M 0x08 #define INTEL_SPI_TIMEOUT 5000 /* ms */ #define INTEL_SPI_FIFO_SZ 64 @@ -1347,7 +1348,12 @@ static int intel_spi_read_desc(struct intel_spi *ispi) case FLCOMP_C0DEN_64M: ispi->chip0_size = SZ_64M; break; + case FLCOMP_C0DEN_128M: + ispi->chip0_size = SZ_128M; + break; default: + dev_warn(ispi->dev, "unsupported C0DEN: %#lx\n", + flcomp & FLCOMP_C0DEN_MASK); return -EINVAL; } diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c index f9371f98a65b..b6c79e50d842 100644 --- a/drivers/spi/spi-nxp-fspi.c +++ b/drivers/spi/spi-nxp-fspi.c @@ -404,6 +404,10 @@ struct nxp_fspi { #define FSPI_NEED_INIT BIT(0) #define FSPI_DTR_MODE BIT(1) int flags; + /* save the previous operation clock rate */ + unsigned long pre_op_rate; + /* the max clock rate fspi output to device */ + unsigned long max_rate; }; static inline int needs_ip_only(struct nxp_fspi *f) @@ -685,10 +689,13 @@ static void nxp_fspi_select_rx_sample_clk_source(struct nxp_fspi *f, * change the mode back to mode 0. */ reg = fspi_readl(f, f->iobase + FSPI_MCR0); - if (op_is_dtr) + if (op_is_dtr) { reg |= FSPI_MCR0_RXCLKSRC(3); - else /*select mode 0 */ + f->max_rate = 166000000; + } else { /*select mode 0 */ reg &= ~FSPI_MCR0_RXCLKSRC(3); + f->max_rate = 66000000; + } fspi_writel(f, reg, f->iobase + FSPI_MCR0); } @@ -719,6 +726,12 @@ static void nxp_fspi_dll_calibration(struct nxp_fspi *f) 0, POLL_TOUT, true); if (ret) dev_warn(f->dev, "DLL lock failed, please fix it!\n"); + + /* + * For ERR050272, DLL lock status bit is not accurate, + * wait for 4us more as a workaround. + */ + udelay(4); } /* @@ -780,11 +793,17 @@ static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi, uint64_t size_kb; /* - * Return, if previously selected target device is same as current - * requested target device. Also the DTR or STR mode do not change. + * Return when following condition all meet, + * 1, if previously selected target device is same as current + * requested target device. + * 2, the DTR or STR mode do not change. + * 3, previous operation max rate equals current one. + * + * For other case, need to re-config. */ if ((f->selected == spi_get_chipselect(spi, 0)) && - (!!(f->flags & FSPI_DTR_MODE) == op_is_dtr)) + (!!(f->flags & FSPI_DTR_MODE) == op_is_dtr) && + (f->pre_op_rate == op->max_freq)) return; /* Reset FLSHxxCR0 registers */ @@ -802,6 +821,7 @@ static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi, dev_dbg(f->dev, "Target device [CS:%x] selected\n", spi_get_chipselect(spi, 0)); nxp_fspi_select_rx_sample_clk_source(f, op_is_dtr); + rate = min(f->max_rate, op->max_freq); if (op_is_dtr) { f->flags |= FSPI_DTR_MODE; @@ -832,6 +852,8 @@ static void nxp_fspi_select_mem(struct nxp_fspi *f, struct spi_device *spi, else nxp_fspi_dll_override(f); + f->pre_op_rate = op->max_freq; + f->selected = spi_get_chipselect(spi, 0); } diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c index 9eba5c0a60f2..b3c2b03b1153 100644 --- a/drivers/spi/spi-rockchip-sfc.c +++ b/drivers/spi/spi-rockchip-sfc.c @@ -704,7 +704,12 @@ static int rockchip_sfc_probe(struct platform_device *pdev) ret = -ENOMEM; goto err_dma; } - sfc->dma_buffer = virt_to_phys(sfc->buffer); + sfc->dma_buffer = dma_map_single(dev, sfc->buffer, + sfc->max_iosize, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, sfc->dma_buffer)) { + ret = -ENOMEM; + goto err_dma_map; + } } ret = devm_spi_register_controller(dev, host); @@ -715,6 +720,9 @@ static int rockchip_sfc_probe(struct platform_device *pdev) return 0; err_register: + dma_unmap_single(dev, sfc->dma_buffer, sfc->max_iosize, + DMA_BIDIRECTIONAL); +err_dma_map: free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize)); err_dma: pm_runtime_get_sync(dev); @@ -736,6 +744,8 @@ static void rockchip_sfc_remove(struct platform_device *pdev) struct spi_controller *host = sfc->host; spi_unregister_controller(host); + dma_unmap_single(&pdev->dev, sfc->dma_buffer, sfc->max_iosize, + DMA_BIDIRECTIONAL); free_pages((unsigned long)sfc->buffer, get_order(sfc->max_iosize)); clk_disable_unprepare(sfc->clk); diff --git a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c index 94bbb3b6576d..01a5bb43cd2d 100644 --- a/drivers/staging/gpib/agilent_82350b/agilent_82350b.c +++ b/drivers/staging/gpib/agilent_82350b/agilent_82350b.c @@ -182,10 +182,12 @@ static int agilent_82350b_accel_write(struct gpib_board *board, u8 *buffer, return retval; #endif - retval = agilent_82350b_write(board, buffer, 1, 0, &num_bytes); - *bytes_written += num_bytes; - if (retval < 0) - return retval; + if (fifotransferlength > 0) { + retval = agilent_82350b_write(board, buffer, 1, 0, &num_bytes); + *bytes_written += num_bytes; + if (retval < 0) + return retval; + } write_byte(tms_priv, tms_priv->imr0_bits & ~HR_BOIE, IMR0); for (i = 1; i < fifotransferlength;) { @@ -217,7 +219,7 @@ static int agilent_82350b_accel_write(struct gpib_board *board, u8 *buffer, break; } write_byte(tms_priv, tms_priv->imr0_bits, IMR0); - if (retval) + if (retval < 0) return retval; if (send_eoi) { diff --git a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c index 164dcfc3c9ef..f7bfb4a8e553 100644 --- a/drivers/staging/gpib/fmh_gpib/fmh_gpib.c +++ b/drivers/staging/gpib/fmh_gpib/fmh_gpib.c @@ -1517,6 +1517,11 @@ void fmh_gpib_detach(struct gpib_board *board) resource_size(e_priv->gpib_iomem_res)); } fmh_gpib_generic_detach(board); + + if (board->dev) { + put_device(board->dev); + board->dev = NULL; + } } static int fmh_gpib_pci_attach_impl(struct gpib_board *board, diff --git a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c index 4dec87d12687..1f8412de9fa3 100644 --- a/drivers/staging/gpib/ni_usb/ni_usb_gpib.c +++ b/drivers/staging/gpib/ni_usb/ni_usb_gpib.c @@ -327,7 +327,10 @@ static void ni_usb_soft_update_status(struct gpib_board *board, unsigned int ni_ board->status &= ~clear_mask; board->status &= ~ni_usb_ibsta_mask; board->status |= ni_usb_ibsta & ni_usb_ibsta_mask; - // FIXME should generate events on DTAS and DCAS + if (ni_usb_ibsta & DCAS) + push_gpib_event(board, EVENT_DEV_CLR); + if (ni_usb_ibsta & DTAS) + push_gpib_event(board, EVENT_DEV_TRG); spin_lock_irqsave(&board->spinlock, flags); /* remove set status bits from monitored set why ?***/ @@ -694,8 +697,12 @@ static int ni_usb_read(struct gpib_board *board, u8 *buffer, size_t length, */ break; case NIUSB_ATN_STATE_ERROR: - retval = -EIO; - dev_err(&usb_dev->dev, "read when ATN set\n"); + if (status.ibsta & DCAS) { + retval = -EINTR; + } else { + retval = -EIO; + dev_dbg(&usb_dev->dev, "read when ATN set stat: 0x%06x\n", status.ibsta); + } break; case NIUSB_ADDRESSING_ERROR: retval = -EIO; diff --git a/drivers/tee/qcomtee/Kconfig b/drivers/tee/qcomtee/Kconfig index 927686abceb1..9f19dee08db4 100644 --- a/drivers/tee/qcomtee/Kconfig +++ b/drivers/tee/qcomtee/Kconfig @@ -2,6 +2,7 @@ # Qualcomm Trusted Execution Environment Configuration config QCOMTEE tristate "Qualcomm TEE Support" + depends on ARCH_QCOM || COMPILE_TEST depends on !CPU_BIG_ENDIAN select QCOM_SCM select QCOM_TZMEM_MODE_SHMBRIDGE diff --git a/drivers/tee/qcomtee/call.c b/drivers/tee/qcomtee/call.c index cc17a48d0ab7..ac134452cc9c 100644 --- a/drivers/tee/qcomtee/call.c +++ b/drivers/tee/qcomtee/call.c @@ -308,7 +308,7 @@ static int qcomtee_params_from_args(struct tee_param *params, } /* Release any IO and OO objects not processed. */ - for (; u[i].type && i < num_params; i++) { + for (; i < num_params && u[i].type; i++) { if (u[i].type == QCOMTEE_ARG_TYPE_OO || u[i].type == QCOMTEE_ARG_TYPE_IO) qcomtee_object_put(u[i].o); diff --git a/drivers/tee/qcomtee/core.c b/drivers/tee/qcomtee/core.c index 783acc59cfa9..b6715ada7700 100644 --- a/drivers/tee/qcomtee/core.c +++ b/drivers/tee/qcomtee/core.c @@ -424,7 +424,7 @@ static int qcomtee_prepare_msg(struct qcomtee_object_invoke_ctx *oic, if (!(u[i].flags & QCOMTEE_ARG_FLAGS_UADDR)) memcpy(msgptr, u[i].b.addr, u[i].b.size); else if (copy_from_user(msgptr, u[i].b.uaddr, u[i].b.size)) - return -EINVAL; + return -EFAULT; offset += qcomtee_msg_offset_align(u[i].b.size); ib++; diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index a53ba04d9770..710ae4d40aec 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c @@ -635,7 +635,9 @@ static int dw8250_probe(struct platform_device *pdev) if (IS_ERR(data->rst)) return PTR_ERR(data->rst); - reset_control_deassert(data->rst); + err = reset_control_deassert(data->rst); + if (err) + return dev_err_probe(dev, err, "failed to deassert resets\n"); err = devm_add_action_or_reset(dev, dw8250_reset_control_assert, data->rst); if (err) diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index 04a0cbab02c2..b9cc0b786ca6 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -40,6 +40,8 @@ #define PCI_DEVICE_ID_ACCESSIO_COM_4SM 0x10db #define PCI_DEVICE_ID_ACCESSIO_COM_8SM 0x10ea +#define PCI_DEVICE_ID_ADVANTECH_XR17V352 0x0018 + #define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002 #define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004 #define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a @@ -1622,6 +1624,12 @@ static const struct exar8250_board pbn_fastcom35x_8 = { .exit = pci_xr17v35x_exit, }; +static const struct exar8250_board pbn_adv_XR17V352 = { + .num_ports = 2, + .setup = pci_xr17v35x_setup, + .exit = pci_xr17v35x_exit, +}; + static const struct exar8250_board pbn_exar_XR17V4358 = { .num_ports = 12, .setup = pci_xr17v35x_setup, @@ -1696,6 +1704,9 @@ static const struct pci_device_id exar_pci_tbl[] = { USR_DEVICE(XR17C152, 2980, pbn_exar_XR17C15x), USR_DEVICE(XR17C152, 2981, pbn_exar_XR17C15x), + /* ADVANTECH devices */ + EXAR_DEVICE(ADVANTECH, XR17V352, pbn_adv_XR17V352), + /* Exar Corp. XR17C15[248] Dual/Quad/Octal UART */ EXAR_DEVICE(EXAR, XR17C152, pbn_exar_XR17C15x), EXAR_DEVICE(EXAR, XR17C154, pbn_exar_XR17C15x), diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index b44de2ed7413..5875a7b9b4b1 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -435,6 +435,7 @@ static int __maybe_unused mtk8250_runtime_suspend(struct device *dev) while (serial_in(up, MTK_UART_DEBUG0)); + clk_disable_unprepare(data->uart_clk); clk_disable_unprepare(data->bus_clk); return 0; @@ -445,6 +446,7 @@ static int __maybe_unused mtk8250_runtime_resume(struct device *dev) struct mtk8250_data *data = dev_get_drvdata(dev); clk_prepare_enable(data->bus_clk); + clk_prepare_enable(data->uart_clk); return 0; } @@ -475,13 +477,13 @@ static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p, int dmacnt; #endif - data->uart_clk = devm_clk_get(&pdev->dev, "baud"); + data->uart_clk = devm_clk_get_enabled(&pdev->dev, "baud"); if (IS_ERR(data->uart_clk)) { /* * For compatibility with older device trees try unnamed * clk when no baud clk can be found. */ - data->uart_clk = devm_clk_get(&pdev->dev, NULL); + data->uart_clk = devm_clk_get_enabled(&pdev->dev, NULL); if (IS_ERR(data->uart_clk)) { dev_warn(&pdev->dev, "Can't get uart clock\n"); return PTR_ERR(data->uart_clk); diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 1a2c4c14f6aa..c7435595dce1 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -588,13 +588,6 @@ static int sc16is7xx_set_baud(struct uart_port *port, int baud) div /= prescaler; } - /* Enable enhanced features */ - sc16is7xx_efr_lock(port); - sc16is7xx_port_update(port, SC16IS7XX_EFR_REG, - SC16IS7XX_EFR_ENABLE_BIT, - SC16IS7XX_EFR_ENABLE_BIT); - sc16is7xx_efr_unlock(port); - /* If bit MCR_CLKSEL is set, the divide by 4 prescaler is activated. */ sc16is7xx_port_update(port, SC16IS7XX_MCR_REG, SC16IS7XX_MCR_CLKSEL_BIT, diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 538b2f991609..62bb62b82cbe 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1014,16 +1014,18 @@ static int sci_handle_fifo_overrun(struct uart_port *port) struct sci_port *s = to_sci_port(port); const struct plat_sci_reg *reg; int copied = 0; - u16 status; + u32 status; - reg = sci_getreg(port, s->params->overrun_reg); - if (!reg->size) - return 0; + if (s->type != SCI_PORT_RSCI) { + reg = sci_getreg(port, s->params->overrun_reg); + if (!reg->size) + return 0; + } - status = sci_serial_in(port, s->params->overrun_reg); + status = s->ops->read_reg(port, s->params->overrun_reg); if (status & s->params->overrun_mask) { status &= ~s->params->overrun_mask; - sci_serial_out(port, s->params->overrun_reg, status); + s->ops->write_reg(port, s->params->overrun_reg, status); port->icount.overrun++; diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index f5bc53875330..47f589c4104a 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -467,6 +467,8 @@ static const struct usb_device_id usb_quirk_list[] = { /* Huawei 4G LTE module */ { USB_DEVICE(0x12d1, 0x15bb), .driver_info = USB_QUIRK_DISCONNECT_SUSPEND }, + { USB_DEVICE(0x12d1, 0x15c1), .driver_info = + USB_QUIRK_DISCONNECT_SUSPEND }, { USB_DEVICE(0x12d1, 0x15c3), .driver_info = USB_QUIRK_DISCONNECT_SUSPEND }, diff --git a/drivers/usb/dwc3/dwc3-generic-plat.c b/drivers/usb/dwc3/dwc3-generic-plat.c index d96b20570002..f8ad79c08c4e 100644 --- a/drivers/usb/dwc3/dwc3-generic-plat.c +++ b/drivers/usb/dwc3/dwc3-generic-plat.c @@ -85,11 +85,8 @@ static int dwc3_generic_probe(struct platform_device *pdev) static void dwc3_generic_remove(struct platform_device *pdev) { struct dwc3 *dwc = platform_get_drvdata(pdev); - struct dwc3_generic *dwc3g = to_dwc3_generic(dwc); dwc3_core_remove(dwc); - - clk_bulk_disable_unprepare(dwc3g->num_clocks, dwc3g->clks); } static int dwc3_generic_suspend(struct device *dev) diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c index 20165e1582d9..b71680c58de6 100644 --- a/drivers/usb/gadget/legacy/raw_gadget.c +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -667,8 +667,6 @@ static void *raw_alloc_io_data(struct usb_raw_ep_io *io, void __user *ptr, return ERR_PTR(-EINVAL); if (!usb_raw_io_flags_valid(io->flags)) return ERR_PTR(-EINVAL); - if (io->length > PAGE_SIZE) - return ERR_PTR(-EINVAL); if (get_from_user) data = memdup_user(ptr + sizeof(*io), io->length); else { diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index 63edf2d8f245..ecda964e018a 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -892,7 +892,8 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) dev_info(dbc->dev, "DbC configured\n"); portsc = readl(&dbc->regs->portsc); writel(portsc, &dbc->regs->portsc); - return EVT_GSER; + ret = EVT_GSER; + break; } return EVT_DONE; @@ -954,7 +955,8 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) break; case TRB_TYPE(TRB_TRANSFER): dbc_handle_xfer_event(dbc, evt); - ret = EVT_XFER_DONE; + if (ret != EVT_GSER) + ret = EVT_XFER_DONE; break; default: break; @@ -1390,8 +1392,15 @@ int xhci_dbc_suspend(struct xhci_hcd *xhci) if (!dbc) return 0; - if (dbc->state == DS_CONFIGURED) + switch (dbc->state) { + case DS_ENABLED: + case DS_CONNECTED: + case DS_CONFIGURED: dbc->resume_required = 1; + break; + default: + break; + } xhci_dbc_stop(dbc); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 5c8ab519f497..f67a4d956204 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -582,6 +582,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd) if (!usb_hcd_is_primary_hcd(hcd)) return 0; + xhci->allow_single_roothub = 1; + if (xhci->quirks & XHCI_PME_STUCK_QUIRK) xhci_pme_acpi_rtd3_enable(pdev); @@ -637,7 +639,6 @@ int xhci_pci_common_probe(struct pci_dev *dev, const struct pci_device_id *id) xhci = hcd_to_xhci(hcd); xhci->reset = reset; - xhci->allow_single_roothub = 1; if (!xhci_has_one_roothub(xhci)) { xhci->shared_hcd = usb_create_shared_hcd(&xhci_pci_hc_driver, &dev->dev, pci_name(dev), hcd); diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index 09ac6f1c985f..0b56b773dbdf 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig @@ -182,6 +182,7 @@ config USB_LJCA config USB_USBIO tristate "Intel USBIO Bridge support" depends on USB && ACPI + depends on X86 || COMPILE_TEST select AUXILIARY_BUS help This adds support for Intel USBIO drivers. diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 62e984d20e59..5de856f65f0d 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -273,6 +273,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EM05CN 0x0312 #define QUECTEL_PRODUCT_EM05G_GR 0x0313 #define QUECTEL_PRODUCT_EM05G_RS 0x0314 +#define QUECTEL_PRODUCT_RG255C 0x0316 #define QUECTEL_PRODUCT_EM12 0x0512 #define QUECTEL_PRODUCT_RM500Q 0x0800 #define QUECTEL_PRODUCT_RM520N 0x0801 @@ -617,6 +618,7 @@ static void option_instat_callback(struct urb *urb); #define UNISOC_VENDOR_ID 0x1782 /* TOZED LT70-C based on UNISOC SL8563 uses UNISOC's vendor ID */ #define TOZED_PRODUCT_LT70C 0x4055 +#define UNISOC_PRODUCT_UIS7720 0x4064 /* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */ #define LUAT_PRODUCT_AIR720U 0x4e00 @@ -1270,6 +1272,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG650V, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RG255C, 0xff, 0xff, 0x40) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, @@ -1398,10 +1403,14 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(0) | NCTRL(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a2, 0xff), /* Telit FN920C04 (MBIM) */ .driver_info = NCTRL(4) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a3, 0xff), /* Telit FN920C04 (ECM) */ + .driver_info = NCTRL(4) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */ .driver_info = RSVD(0) | NCTRL(3) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a7, 0xff), /* Telit FN920C04 (MBIM) */ .driver_info = NCTRL(4) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a8, 0xff), /* Telit FN920C04 (ECM) */ + .driver_info = NCTRL(4) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */ .driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10aa, 0xff), /* Telit FN920C04 (MBIM) */ @@ -2466,6 +2475,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9291, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, UNISOC_PRODUCT_UIS7720, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) }, { USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff), /* TCL IK512 MBIM */ .driver_info = NCTRL(1) }, diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index b2a568a5bc9b..cc78770509db 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -7876,9 +7876,9 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) port->partner_desc.identity = &port->partner_ident; - port->role_sw = usb_role_switch_get(port->dev); + port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode); if (!port->role_sw) - port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode); + port->role_sw = usb_role_switch_get(port->dev); if (IS_ERR(port->role_sw)) { err = PTR_ERR(port->role_sw); goto out_destroy_wq; diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index e466cf52d7d7..7f7e6bb23a90 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -988,10 +988,11 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, static void nfsd4_read_release(union nfsd4_op_u *u) { - if (u->read.rd_nf) + if (u->read.rd_nf) { + trace_nfsd_read_done(u->read.rd_rqstp, u->read.rd_fhp, + u->read.rd_offset, u->read.rd_length); nfsd_file_put(u->read.rd_nf); - trace_nfsd_read_done(u->read.rd_rqstp, u->read.rd_fhp, - u->read.rd_offset, u->read.rd_length); + } } static __be32 @@ -2892,10 +2893,20 @@ nfsd4_proc_compound(struct svc_rqst *rqstp) rqstp->rq_lease_breaker = (void **)&cstate->clp; - trace_nfsd_compound(rqstp, args->tag, args->taglen, args->opcnt); + trace_nfsd_compound(rqstp, args->tag, args->taglen, args->client_opcnt); while (!status && resp->opcnt < args->opcnt) { op = &args->ops[resp->opcnt++]; + if (unlikely(resp->opcnt == NFSD_MAX_OPS_PER_COMPOUND)) { + /* If there are still more operations to process, + * stop here and report NFS4ERR_RESOURCE. */ + if (cstate->minorversion == 0 && + args->client_opcnt > resp->opcnt) { + op->status = nfserr_resource; + goto encode_op; + } + } + /* * The XDR decode routines may have pre-set op->status; * for example, if there is a miscellaneous XDR error @@ -2972,7 +2983,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp) status = op->status; } - trace_nfsd_compound_status(args->opcnt, resp->opcnt, + trace_nfsd_compound_status(args->client_opcnt, resp->opcnt, status, nfsd4_op_name(op->opnum)); nfsd4_cstate_clear_replay(cstate); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 81fa7cc6c77b..c1b54322c412 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3902,6 +3902,7 @@ static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfs ca->headerpadsz = 0; ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc); ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); + ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND); ca->maxresp_cached = min_t(u32, ca->maxresp_cached, NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ); ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index c0a3c6a7c8bb..6040a6145dad 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2488,8 +2488,10 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp) if (xdr_stream_decode_u32(argp->xdr, &argp->minorversion) < 0) return false; - if (xdr_stream_decode_u32(argp->xdr, &argp->opcnt) < 0) + if (xdr_stream_decode_u32(argp->xdr, &argp->client_opcnt) < 0) return false; + argp->opcnt = min_t(u32, argp->client_opcnt, + NFSD_MAX_OPS_PER_COMPOUND); if (argp->opcnt > ARRAY_SIZE(argp->iops)) { argp->ops = vcalloc(argp->opcnt, sizeof(*argp->ops)); @@ -2628,10 +2630,8 @@ static __be32 nfsd4_encode_components_esc(struct xdr_stream *xdr, char sep, __be32 *p; __be32 pathlen; int pathlen_offset; - int strlen, count=0; char *str, *end, *next; - - dprintk("nfsd4_encode_components(%s)\n", components); + int count = 0; pathlen_offset = xdr->buf->len; p = xdr_reserve_space(xdr, 4); @@ -2658,9 +2658,8 @@ static __be32 nfsd4_encode_components_esc(struct xdr_stream *xdr, char sep, for (; *end && (*end != sep); end++) /* find sep or end of string */; - strlen = end - str; - if (strlen) { - if (xdr_stream_encode_opaque(xdr, str, strlen) < 0) + if (end > str) { + if (xdr_stream_encode_opaque(xdr, str, end - str) < 0) return nfserr_resource; count++; } else @@ -2939,6 +2938,12 @@ struct nfsd4_fattr_args { typedef __be32(*nfsd4_enc_attr)(struct xdr_stream *xdr, const struct nfsd4_fattr_args *args); +static __be32 nfsd4_encode_fattr4__inval(struct xdr_stream *xdr, + const struct nfsd4_fattr_args *args) +{ + return nfserr_inval; +} + static __be32 nfsd4_encode_fattr4__noop(struct xdr_stream *xdr, const struct nfsd4_fattr_args *args) { @@ -3560,6 +3565,8 @@ static const nfsd4_enc_attr nfsd4_enc_fattr4_encode_ops[] = { [FATTR4_MODE_UMASK] = nfsd4_encode_fattr4__noop, [FATTR4_XATTR_SUPPORT] = nfsd4_encode_fattr4_xattr_support, + [FATTR4_TIME_DELEG_ACCESS] = nfsd4_encode_fattr4__inval, + [FATTR4_TIME_DELEG_MODIFY] = nfsd4_encode_fattr4__inval, [FATTR4_OPEN_ARGUMENTS] = nfsd4_encode_fattr4_open_arguments, }; diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index ea87b42894dd..f19320018639 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -57,6 +57,9 @@ struct readdir_cd { __be32 err; /* 0, nfserr, or nfserr_eof */ }; +/* Maximum number of operations per session compound */ +#define NFSD_MAX_OPS_PER_COMPOUND 200 + struct nfsd_genl_rqstp { struct sockaddr rq_daddr; struct sockaddr rq_saddr; diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index d4b48602b2b0..ee0570cbdd9e 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -903,6 +903,7 @@ struct nfsd4_compoundargs { char * tag; u32 taglen; u32 minorversion; + u32 client_opcnt; u32 opcnt; bool splice_ok; struct nfsd4_op *ops; diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c index 4076336fbba6..572a9925bd6c 100644 --- a/fs/resctrl/monitor.c +++ b/fs/resctrl/monitor.c @@ -1782,15 +1782,13 @@ int resctrl_mon_resource_init(void) mba_mbps_default_event = QOS_L3_MBM_TOTAL_EVENT_ID; if (r->mon.mbm_cntr_assignable) { - if (!resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID)) - resctrl_enable_mon_event(QOS_L3_MBM_TOTAL_EVENT_ID); - if (!resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID)) - resctrl_enable_mon_event(QOS_L3_MBM_LOCAL_EVENT_ID); - mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask; - mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask & - (READS_TO_LOCAL_MEM | - READS_TO_LOCAL_S_MEM | - NON_TEMP_WRITE_TO_LOCAL_MEM); + if (resctrl_is_mon_event_enabled(QOS_L3_MBM_TOTAL_EVENT_ID)) + mon_event_all[QOS_L3_MBM_TOTAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask; + if (resctrl_is_mon_event_enabled(QOS_L3_MBM_LOCAL_EVENT_ID)) + mon_event_all[QOS_L3_MBM_LOCAL_EVENT_ID].evt_cfg = r->mon.mbm_cfg_mask & + (READS_TO_LOCAL_MEM | + READS_TO_LOCAL_S_MEM | + NON_TEMP_WRITE_TO_LOCAL_MEM); r->mon.mbm_assign_on_mkdir = true; resctrl_file_fflags_init("num_mbm_cntrs", RFTYPE_MON_INFO | RFTYPE_RES_CACHE); diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h index 16a00a61fd2c..203e2aaa3c25 100644 --- a/fs/smb/client/cifsglob.h +++ b/fs/smb/client/cifsglob.h @@ -534,8 +534,6 @@ struct smb_version_operations { void (*new_lease_key)(struct cifs_fid *); int (*generate_signingkey)(struct cifs_ses *ses, struct TCP_Server_Info *server); - int (*calc_signature)(struct smb_rqst *, struct TCP_Server_Info *, - bool allocate_crypto); int (*set_integrity)(const unsigned int, struct cifs_tcon *tcon, struct cifsFileInfo *src_file); int (*enum_snapshots)(const unsigned int xid, struct cifs_tcon *tcon, @@ -732,7 +730,7 @@ struct TCP_Server_Info { bool nosharesock; bool tcp_nodelay; bool terminate; - unsigned int credits; /* send no more requests at once */ + int credits; /* send no more requests at once */ unsigned int max_credits; /* can override large 32000 default at mnt */ unsigned int in_flight; /* number of requests on the wire to server */ unsigned int max_in_flight; /* max number of requests that were on wire */ diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h index 4976be2c47c1..fb1813cbe0eb 100644 --- a/fs/smb/client/cifsproto.h +++ b/fs/smb/client/cifsproto.h @@ -9,6 +9,7 @@ #define _CIFSPROTO_H #include #include +#include "cifsglob.h" #include "trace.h" #ifdef CONFIG_CIFS_DFS_UPCALL #include "dfs_cache.h" diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c index 2881efcbe09a..7da194f29fef 100644 --- a/fs/smb/client/cifssmb.c +++ b/fs/smb/client/cifssmb.c @@ -1311,6 +1311,8 @@ cifs_readv_callback(struct mid_q_entry *mid) .rreq_debug_id = rdata->rreq->debug_id, .rreq_debug_index = rdata->subreq.debug_index, }; + unsigned int rreq_debug_id = rdata->rreq->debug_id; + unsigned int subreq_debug_index = rdata->subreq.debug_index; cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%zu\n", __func__, mid->mid, mid->mid_state, rdata->result, @@ -1374,6 +1376,9 @@ cifs_readv_callback(struct mid_q_entry *mid) __set_bit(NETFS_SREQ_MADE_PROGRESS, &rdata->subreq.flags); } + trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, rdata->credits.value, + server->credits, server->in_flight, + 0, cifs_trace_rw_credits_read_response_clear); rdata->credits.value = 0; rdata->subreq.error = rdata->result; rdata->subreq.transferred += rdata->got_bytes; @@ -1381,6 +1386,9 @@ cifs_readv_callback(struct mid_q_entry *mid) netfs_read_subreq_terminated(&rdata->subreq); release_mid(mid); add_credits(server, &credits, 0); + trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0, + server->credits, server->in_flight, + credits.value, cifs_trace_rw_credits_read_response_add); } /* cifs_async_readv - send an async write, and set up mid to handle result */ diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c index 098a79b7a959..cac355364e43 100644 --- a/fs/smb/client/inode.c +++ b/fs/smb/client/inode.c @@ -2484,11 +2484,8 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry, } #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ do_rename_exit: - if (rc == 0) { + if (rc == 0) d_move(from_dentry, to_dentry); - /* Force a new lookup */ - d_drop(from_dentry); - } cifs_put_tlink(tlink); return rc; } diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c index 95cd484cfbba..0f9130ef2e7d 100644 --- a/fs/smb/client/smb2ops.c +++ b/fs/smb/client/smb2ops.c @@ -5446,7 +5446,6 @@ struct smb_version_operations smb20_operations = { .get_lease_key = smb2_get_lease_key, .set_lease_key = smb2_set_lease_key, .new_lease_key = smb2_new_lease_key, - .calc_signature = smb2_calc_signature, .is_read_op = smb2_is_read_op, .set_oplock_level = smb2_set_oplock_level, .create_lease_buf = smb2_create_lease_buf, @@ -5550,7 +5549,6 @@ struct smb_version_operations smb21_operations = { .get_lease_key = smb2_get_lease_key, .set_lease_key = smb2_set_lease_key, .new_lease_key = smb2_new_lease_key, - .calc_signature = smb2_calc_signature, .is_read_op = smb21_is_read_op, .set_oplock_level = smb21_set_oplock_level, .create_lease_buf = smb2_create_lease_buf, @@ -5660,7 +5658,6 @@ struct smb_version_operations smb30_operations = { .set_lease_key = smb2_set_lease_key, .new_lease_key = smb2_new_lease_key, .generate_signingkey = generate_smb30signingkey, - .calc_signature = smb3_calc_signature, .set_integrity = smb3_set_integrity, .is_read_op = smb21_is_read_op, .set_oplock_level = smb3_set_oplock_level, @@ -5777,7 +5774,6 @@ struct smb_version_operations smb311_operations = { .set_lease_key = smb2_set_lease_key, .new_lease_key = smb2_new_lease_key, .generate_signingkey = generate_smb311signingkey, - .calc_signature = smb3_calc_signature, .set_integrity = smb3_set_integrity, .is_read_op = smb21_is_read_op, .set_oplock_level = smb3_set_oplock_level, diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h index 6eb86d134abc..5241daaae543 100644 --- a/fs/smb/client/smb2proto.h +++ b/fs/smb/client/smb2proto.h @@ -39,12 +39,6 @@ extern struct mid_q_entry *smb2_setup_async_request( struct TCP_Server_Info *server, struct smb_rqst *rqst); extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid); -extern int smb2_calc_signature(struct smb_rqst *rqst, - struct TCP_Server_Info *server, - bool allocate_crypto); -extern int smb3_calc_signature(struct smb_rqst *rqst, - struct TCP_Server_Info *server, - bool allocate_crypto); extern void smb2_echo_request(struct work_struct *work); extern __le32 smb2_get_lease_state(struct cifsInodeInfo *cinode); extern bool smb2_is_valid_oplock_break(char *buffer, diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c index ad6068e17a2a..6a9b80385b86 100644 --- a/fs/smb/client/smb2transport.c +++ b/fs/smb/client/smb2transport.c @@ -209,9 +209,9 @@ smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid) return tcon; } -int +static int smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server, - bool allocate_crypto) + bool allocate_crypto) { int rc; unsigned char smb2_signature[SMB2_HMACSHA256_SIZE]; @@ -465,9 +465,9 @@ generate_smb311signingkey(struct cifs_ses *ses, return generate_smb3signingkey(ses, server, &triplet); } -int +static int smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server, - bool allocate_crypto) + bool allocate_crypto) { int rc; unsigned char smb3_signature[SMB2_CMACAES_SIZE]; @@ -477,6 +477,9 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server, struct smb_rqst drqst; u8 key[SMB3_SIGN_KEY_SIZE]; + if (server->vals->protocol_id <= SMB21_PROT_ID) + return smb2_calc_signature(rqst, server, allocate_crypto); + rc = smb3_get_sign_key(le64_to_cpu(shdr->SessionId), server, key); if (unlikely(rc)) { cifs_server_dbg(FYI, "%s: Could not get signing key\n", __func__); @@ -547,7 +550,6 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server, static int smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server) { - int rc = 0; struct smb2_hdr *shdr; struct smb2_sess_setup_req *ssr; bool is_binding; @@ -574,9 +576,7 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server) return 0; } - rc = server->ops->calc_signature(rqst, server, false); - - return rc; + return smb3_calc_signature(rqst, server, false); } int @@ -612,7 +612,7 @@ smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) memset(shdr->Signature, 0, SMB2_SIGNATURE_SIZE); - rc = server->ops->calc_signature(rqst, server, true); + rc = smb3_calc_signature(rqst, server, true); if (rc) return rc; diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c index 49e2df3ad1f0..85a4c55b61b8 100644 --- a/fs/smb/client/smbdirect.c +++ b/fs/smb/client/smbdirect.c @@ -172,6 +172,7 @@ static void smbd_disconnect_wake_up_all(struct smbdirect_socket *sc) * in order to notice the broken connection. */ wake_up_all(&sc->status_wait); + wake_up_all(&sc->send_io.lcredits.wait_queue); wake_up_all(&sc->send_io.credits.wait_queue); wake_up_all(&sc->send_io.pending.dec_wait_queue); wake_up_all(&sc->send_io.pending.zero_wait_queue); @@ -495,6 +496,7 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc) struct smbdirect_send_io *request = container_of(wc->wr_cqe, struct smbdirect_send_io, cqe); struct smbdirect_socket *sc = request->socket; + int lcredits = 0; log_rdma_send(INFO, "smbdirect_send_io 0x%p completed wc->status=%s\n", request, ib_wc_status_msg(wc->status)); @@ -504,22 +506,24 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc) request->sge[i].addr, request->sge[i].length, DMA_TO_DEVICE); + mempool_free(request, sc->send_io.mem.pool); + lcredits += 1; if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { if (wc->status != IB_WC_WR_FLUSH_ERR) log_rdma_send(ERR, "wc->status=%s wc->opcode=%d\n", ib_wc_status_msg(wc->status), wc->opcode); - mempool_free(request, sc->send_io.mem.pool); smbd_disconnect_rdma_connection(sc); return; } + atomic_add(lcredits, &sc->send_io.lcredits.count); + wake_up(&sc->send_io.lcredits.wait_queue); + if (atomic_dec_and_test(&sc->send_io.pending.count)) wake_up(&sc->send_io.pending.zero_wait_queue); wake_up(&sc->send_io.pending.dec_wait_queue); - - mempool_free(request, sc->send_io.mem.pool); } static void dump_smbdirect_negotiate_resp(struct smbdirect_negotiate_resp *resp) @@ -567,6 +571,7 @@ static bool process_negotiation_response( log_rdma_event(ERR, "error: credits_granted==0\n"); return false; } + atomic_set(&sc->send_io.lcredits.count, sp->send_credit_target); atomic_set(&sc->send_io.credits.count, le16_to_cpu(packet->credits_granted)); if (le32_to_cpu(packet->preferred_send_size) > sp->max_recv_size) { @@ -1114,6 +1119,24 @@ static int smbd_post_send_iter(struct smbdirect_socket *sc, struct smbdirect_data_transfer *packet; int new_credits = 0; +wait_lcredit: + /* Wait for local send credits */ + rc = wait_event_interruptible(sc->send_io.lcredits.wait_queue, + atomic_read(&sc->send_io.lcredits.count) > 0 || + sc->status != SMBDIRECT_SOCKET_CONNECTED); + if (rc) + goto err_wait_lcredit; + + if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { + log_outgoing(ERR, "disconnected not sending on wait_credit\n"); + rc = -EAGAIN; + goto err_wait_lcredit; + } + if (unlikely(atomic_dec_return(&sc->send_io.lcredits.count) < 0)) { + atomic_inc(&sc->send_io.lcredits.count); + goto wait_lcredit; + } + wait_credit: /* Wait for send credits. A SMBD packet needs one credit */ rc = wait_event_interruptible(sc->send_io.credits.wait_queue, @@ -1132,23 +1155,6 @@ static int smbd_post_send_iter(struct smbdirect_socket *sc, goto wait_credit; } -wait_send_queue: - wait_event(sc->send_io.pending.dec_wait_queue, - atomic_read(&sc->send_io.pending.count) < sp->send_credit_target || - sc->status != SMBDIRECT_SOCKET_CONNECTED); - - if (sc->status != SMBDIRECT_SOCKET_CONNECTED) { - log_outgoing(ERR, "disconnected not sending on wait_send_queue\n"); - rc = -EAGAIN; - goto err_wait_send_queue; - } - - if (unlikely(atomic_inc_return(&sc->send_io.pending.count) > - sp->send_credit_target)) { - atomic_dec(&sc->send_io.pending.count); - goto wait_send_queue; - } - request = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL); if (!request) { rc = -ENOMEM; @@ -1229,10 +1235,21 @@ static int smbd_post_send_iter(struct smbdirect_socket *sc, le32_to_cpu(packet->data_length), le32_to_cpu(packet->remaining_data_length)); + /* + * Now that we got a local and a remote credit + * we add us as pending + */ + atomic_inc(&sc->send_io.pending.count); + rc = smbd_post_send(sc, request); if (!rc) return 0; + if (atomic_dec_and_test(&sc->send_io.pending.count)) + wake_up(&sc->send_io.pending.zero_wait_queue); + + wake_up(&sc->send_io.pending.dec_wait_queue); + err_dma: for (i = 0; i < request->num_sge; i++) if (request->sge[i].addr) @@ -1246,14 +1263,14 @@ static int smbd_post_send_iter(struct smbdirect_socket *sc, atomic_sub(new_credits, &sc->recv_io.credits.count); err_alloc: - if (atomic_dec_and_test(&sc->send_io.pending.count)) - wake_up(&sc->send_io.pending.zero_wait_queue); - -err_wait_send_queue: - /* roll back send credits and pending */ atomic_inc(&sc->send_io.credits.count); + wake_up(&sc->send_io.credits.wait_queue); err_wait_credit: + atomic_inc(&sc->send_io.lcredits.count); + wake_up(&sc->send_io.lcredits.wait_queue); + +err_wait_lcredit: return rc; } @@ -1767,6 +1784,7 @@ static struct smbd_connection *_smbd_get_connection( struct smbdirect_socket *sc; struct smbdirect_socket_parameters *sp; struct rdma_conn_param conn_param; + struct ib_qp_cap qp_cap; struct ib_qp_init_attr qp_attr; struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr; struct ib_port_immutable port_immutable; @@ -1838,6 +1856,25 @@ static struct smbd_connection *_smbd_get_connection( goto config_failed; } + sp->responder_resources = + min_t(u8, sp->responder_resources, + sc->ib.dev->attrs.max_qp_rd_atom); + log_rdma_mr(INFO, "responder_resources=%d\n", + sp->responder_resources); + + /* + * We use allocate sp->responder_resources * 2 MRs + * and each MR needs WRs for REG and INV, so + * we use '* 4'. + * + * +1 for ib_drain_qp() + */ + memset(&qp_cap, 0, sizeof(qp_cap)); + qp_cap.max_send_wr = sp->send_credit_target + sp->responder_resources * 4 + 1; + qp_cap.max_recv_wr = sp->recv_credit_max + 1; + qp_cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE; + qp_cap.max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE; + sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0); if (IS_ERR(sc->ib.pd)) { rc = PTR_ERR(sc->ib.pd); @@ -1848,7 +1885,7 @@ static struct smbd_connection *_smbd_get_connection( sc->ib.send_cq = ib_alloc_cq_any(sc->ib.dev, sc, - sp->send_credit_target, IB_POLL_SOFTIRQ); + qp_cap.max_send_wr, IB_POLL_SOFTIRQ); if (IS_ERR(sc->ib.send_cq)) { sc->ib.send_cq = NULL; goto alloc_cq_failed; @@ -1856,7 +1893,7 @@ static struct smbd_connection *_smbd_get_connection( sc->ib.recv_cq = ib_alloc_cq_any(sc->ib.dev, sc, - sp->recv_credit_max, IB_POLL_SOFTIRQ); + qp_cap.max_recv_wr, IB_POLL_SOFTIRQ); if (IS_ERR(sc->ib.recv_cq)) { sc->ib.recv_cq = NULL; goto alloc_cq_failed; @@ -1865,11 +1902,7 @@ static struct smbd_connection *_smbd_get_connection( memset(&qp_attr, 0, sizeof(qp_attr)); qp_attr.event_handler = smbd_qp_async_error_upcall; qp_attr.qp_context = sc; - qp_attr.cap.max_send_wr = sp->send_credit_target; - qp_attr.cap.max_recv_wr = sp->recv_credit_max; - qp_attr.cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE; - qp_attr.cap.max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE; - qp_attr.cap.max_inline_data = 0; + qp_attr.cap = qp_cap; qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; qp_attr.qp_type = IB_QPT_RC; qp_attr.send_cq = sc->ib.send_cq; @@ -1883,12 +1916,6 @@ static struct smbd_connection *_smbd_get_connection( } sc->ib.qp = sc->rdma.cm_id->qp; - sp->responder_resources = - min_t(u8, sp->responder_resources, - sc->ib.dev->attrs.max_qp_rd_atom); - log_rdma_mr(INFO, "responder_resources=%d\n", - sp->responder_resources); - memset(&conn_param, 0, sizeof(conn_param)); conn_param.initiator_depth = sp->initiator_depth; conn_param.responder_resources = sp->responder_resources; diff --git a/fs/smb/client/trace.c b/fs/smb/client/trace.c index 465483787193..16b0e719731f 100644 --- a/fs/smb/client/trace.c +++ b/fs/smb/client/trace.c @@ -4,5 +4,6 @@ * * Author(s): Steve French */ +#include "cifsglob.h" #define CREATE_TRACE_POINTS #include "trace.h" diff --git a/fs/smb/common/smbdirect/smbdirect_socket.h b/fs/smb/common/smbdirect/smbdirect_socket.h index 361db7f9f623..ee5a90d691c8 100644 --- a/fs/smb/common/smbdirect/smbdirect_socket.h +++ b/fs/smb/common/smbdirect/smbdirect_socket.h @@ -142,7 +142,15 @@ struct smbdirect_socket { } mem; /* - * The credit state for the send side + * The local credit state for ib_post_send() + */ + struct { + atomic_t count; + wait_queue_head_t wait_queue; + } lcredits; + + /* + * The remote credit state for the send side */ struct { atomic_t count; @@ -337,6 +345,9 @@ static __always_inline void smbdirect_socket_init(struct smbdirect_socket *sc) INIT_DELAYED_WORK(&sc->idle.timer_work, __smbdirect_socket_disabled_work); disable_delayed_work_sync(&sc->idle.timer_work); + atomic_set(&sc->send_io.lcredits.count, 0); + init_waitqueue_head(&sc->send_io.lcredits.wait_queue); + atomic_set(&sc->send_io.credits.count, 0); init_waitqueue_head(&sc->send_io.credits.wait_queue); diff --git a/fs/smb/server/transport_ipc.c b/fs/smb/server/transport_ipc.c index 46f87fd1ce1c..2c08cccfa680 100644 --- a/fs/smb/server/transport_ipc.c +++ b/fs/smb/server/transport_ipc.c @@ -263,10 +263,16 @@ static void ipc_msg_handle_free(int handle) static int handle_response(int type, void *payload, size_t sz) { - unsigned int handle = *(unsigned int *)payload; + unsigned int handle; struct ipc_msg_table_entry *entry; int ret = 0; + /* Prevent 4-byte read beyond declared payload size */ + if (sz < sizeof(unsigned int)) + return -EINVAL; + + handle = *(unsigned int *)payload; + ipc_update_last_active(); down_read(&ipc_msg_table_lock); hash_for_each_possible(ipc_msg_table, entry, ipc_table_hlist, handle) { diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c index a201c5871a77..7d86553fcc7c 100644 --- a/fs/smb/server/transport_rdma.c +++ b/fs/smb/server/transport_rdma.c @@ -219,6 +219,7 @@ static void smb_direct_disconnect_wake_up_all(struct smbdirect_socket *sc) * in order to notice the broken connection. */ wake_up_all(&sc->status_wait); + wake_up_all(&sc->send_io.lcredits.wait_queue); wake_up_all(&sc->send_io.credits.wait_queue); wake_up_all(&sc->send_io.pending.zero_wait_queue); wake_up_all(&sc->recv_io.reassembly.wait_queue); @@ -417,9 +418,6 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id) sc->ib.dev = sc->rdma.cm_id->device; - INIT_WORK(&sc->recv_io.posted.refill_work, - smb_direct_post_recv_credits); - INIT_WORK(&sc->idle.immediate_work, smb_direct_send_immediate_work); INIT_DELAYED_WORK(&sc->idle.timer_work, smb_direct_idle_connection_timer); conn = ksmbd_conn_alloc(); @@ -450,11 +448,10 @@ static void free_transport(struct smb_direct_transport *t) struct smbdirect_recv_io *recvmsg; disable_work_sync(&sc->disconnect_work); - if (sc->status < SMBDIRECT_SOCKET_DISCONNECTING) { + if (sc->status < SMBDIRECT_SOCKET_DISCONNECTING) smb_direct_disconnect_rdma_work(&sc->disconnect_work); - wait_event_interruptible(sc->status_wait, - sc->status == SMBDIRECT_SOCKET_DISCONNECTED); - } + if (sc->status < SMBDIRECT_SOCKET_DISCONNECTED) + wait_event(sc->status_wait, sc->status == SMBDIRECT_SOCKET_DISCONNECTED); /* * Wake up all waiters in all wait queues @@ -469,9 +466,11 @@ static void free_transport(struct smb_direct_transport *t) disable_delayed_work_sync(&sc->idle.timer_work); disable_work_sync(&sc->idle.immediate_work); + if (sc->rdma.cm_id) + rdma_lock_handler(sc->rdma.cm_id); + if (sc->ib.qp) { ib_drain_qp(sc->ib.qp); - ib_mr_pool_destroy(sc->ib.qp, &sc->ib.qp->rdma_mrs); sc->ib.qp = NULL; rdma_destroy_qp(sc->rdma.cm_id); } @@ -498,8 +497,10 @@ static void free_transport(struct smb_direct_transport *t) ib_free_cq(sc->ib.recv_cq); if (sc->ib.pd) ib_dealloc_pd(sc->ib.pd); - if (sc->rdma.cm_id) + if (sc->rdma.cm_id) { + rdma_unlock_handler(sc->rdma.cm_id); rdma_destroy_id(sc->rdma.cm_id); + } smb_direct_destroy_pools(sc); ksmbd_conn_free(KSMBD_TRANS(t)->conn); @@ -524,6 +525,12 @@ static void smb_direct_free_sendmsg(struct smbdirect_socket *sc, { int i; + /* + * The list needs to be empty! + * The caller should take care of it. + */ + WARN_ON_ONCE(!list_empty(&msg->sibling_list)); + if (msg->num_sge > 0) { ib_dma_unmap_single(sc->ib.dev, msg->sge[0].addr, msg->sge[0].length, @@ -909,9 +916,9 @@ static void smb_direct_post_recv_credits(struct work_struct *work) static void send_done(struct ib_cq *cq, struct ib_wc *wc) { - struct smbdirect_send_io *sendmsg, *sibling; + struct smbdirect_send_io *sendmsg, *sibling, *next; struct smbdirect_socket *sc; - struct list_head *pos, *prev, *end; + int lcredits = 0; sendmsg = container_of(wc->wr_cqe, struct smbdirect_send_io, cqe); sc = sendmsg->socket; @@ -920,27 +927,31 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc) ib_wc_status_msg(wc->status), wc->status, wc->opcode); + /* + * Free possible siblings and then the main send_io + */ + list_for_each_entry_safe(sibling, next, &sendmsg->sibling_list, sibling_list) { + list_del_init(&sibling->sibling_list); + smb_direct_free_sendmsg(sc, sibling); + lcredits += 1; + } + /* Note this frees wc->wr_cqe, but not wc */ + smb_direct_free_sendmsg(sc, sendmsg); + lcredits += 1; + if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) { pr_err("Send error. status='%s (%d)', opcode=%d\n", ib_wc_status_msg(wc->status), wc->status, wc->opcode); smb_direct_disconnect_rdma_connection(sc); + return; } + atomic_add(lcredits, &sc->send_io.lcredits.count); + wake_up(&sc->send_io.lcredits.wait_queue); + if (atomic_dec_and_test(&sc->send_io.pending.count)) wake_up(&sc->send_io.pending.zero_wait_queue); - - /* iterate and free the list of messages in reverse. the list's head - * is invalid. - */ - for (pos = &sendmsg->sibling_list, prev = pos->prev, end = sendmsg->sibling_list.next; - prev != end; pos = prev, prev = prev->prev) { - sibling = container_of(pos, struct smbdirect_send_io, sibling_list); - smb_direct_free_sendmsg(sc, sibling); - } - - sibling = container_of(pos, struct smbdirect_send_io, sibling_list); - smb_direct_free_sendmsg(sc, sibling); } static int manage_credits_prior_sending(struct smbdirect_socket *sc) @@ -988,8 +999,6 @@ static int smb_direct_post_send(struct smbdirect_socket *sc, ret = ib_post_send(sc->ib.qp, wr, NULL); if (ret) { pr_err("failed to post send: %d\n", ret); - if (atomic_dec_and_test(&sc->send_io.pending.count)) - wake_up(&sc->send_io.pending.zero_wait_queue); smb_direct_disconnect_rdma_connection(sc); } return ret; @@ -1032,19 +1041,29 @@ static int smb_direct_flush_send_list(struct smbdirect_socket *sc, last->wr.send_flags = IB_SEND_SIGNALED; last->wr.wr_cqe = &last->cqe; + /* + * Remove last from send_ctx->msg_list + * and splice the rest of send_ctx->msg_list + * to last->sibling_list. + * + * send_ctx->msg_list is a valid empty list + * at the end. + */ + list_del_init(&last->sibling_list); + list_splice_tail_init(&send_ctx->msg_list, &last->sibling_list); + send_ctx->wr_cnt = 0; + ret = smb_direct_post_send(sc, &first->wr); - if (!ret) { - smb_direct_send_ctx_init(send_ctx, - send_ctx->need_invalidate_rkey, - send_ctx->remote_key); - } else { - atomic_add(send_ctx->wr_cnt, &sc->send_io.credits.count); - wake_up(&sc->send_io.credits.wait_queue); - list_for_each_entry_safe(first, last, &send_ctx->msg_list, - sibling_list) { - smb_direct_free_sendmsg(sc, first); + if (ret) { + struct smbdirect_send_io *sibling, *next; + + list_for_each_entry_safe(sibling, next, &last->sibling_list, sibling_list) { + list_del_init(&sibling->sibling_list); + smb_direct_free_sendmsg(sc, sibling); } + smb_direct_free_sendmsg(sc, last); } + return ret; } @@ -1070,6 +1089,23 @@ static int wait_for_credits(struct smbdirect_socket *sc, } while (true); } +static int wait_for_send_lcredit(struct smbdirect_socket *sc, + struct smbdirect_send_batch *send_ctx) +{ + if (send_ctx && (atomic_read(&sc->send_io.lcredits.count) <= 1)) { + int ret; + + ret = smb_direct_flush_send_list(sc, send_ctx, false); + if (ret) + return ret; + } + + return wait_for_credits(sc, + &sc->send_io.lcredits.wait_queue, + &sc->send_io.lcredits.count, + 1); +} + static int wait_for_send_credits(struct smbdirect_socket *sc, struct smbdirect_send_batch *send_ctx) { @@ -1257,9 +1293,13 @@ static int smb_direct_post_send_data(struct smbdirect_socket *sc, int data_length; struct scatterlist sg[SMBDIRECT_SEND_IO_MAX_SGE - 1]; + ret = wait_for_send_lcredit(sc, send_ctx); + if (ret) + goto lcredit_failed; + ret = wait_for_send_credits(sc, send_ctx); if (ret) - return ret; + goto credit_failed; data_length = 0; for (i = 0; i < niov; i++) @@ -1267,10 +1307,8 @@ static int smb_direct_post_send_data(struct smbdirect_socket *sc, ret = smb_direct_create_header(sc, data_length, remaining_data_length, &msg); - if (ret) { - atomic_inc(&sc->send_io.credits.count); - return ret; - } + if (ret) + goto header_failed; for (i = 0; i < niov; i++) { struct ib_sge *sge; @@ -1308,7 +1346,11 @@ static int smb_direct_post_send_data(struct smbdirect_socket *sc, return 0; err: smb_direct_free_sendmsg(sc, msg); +header_failed: atomic_inc(&sc->send_io.credits.count); +credit_failed: + atomic_inc(&sc->send_io.lcredits.count); +lcredit_failed: return ret; } @@ -1687,10 +1729,10 @@ static int smb_direct_cm_handler(struct rdma_cm_id *cm_id, } case RDMA_CM_EVENT_DEVICE_REMOVAL: case RDMA_CM_EVENT_DISCONNECTED: { - ib_drain_qp(sc->ib.qp); - sc->status = SMBDIRECT_SOCKET_DISCONNECTED; smb_direct_disconnect_rdma_work(&sc->disconnect_work); + if (sc->ib.qp) + ib_drain_qp(sc->ib.qp); break; } case RDMA_CM_EVENT_CONNECT_ERROR: { @@ -1864,27 +1906,17 @@ static int smb_direct_prepare_negotiation(struct smbdirect_socket *sc) goto out_err; } - smb_direct_post_recv_credits(&sc->recv_io.posted.refill_work); return 0; out_err: put_recvmsg(sc, recvmsg); return ret; } -static unsigned int smb_direct_get_max_fr_pages(struct smbdirect_socket *sc) -{ - return min_t(unsigned int, - sc->ib.dev->attrs.max_fast_reg_page_list_len, - 256); -} - -static int smb_direct_init_params(struct smbdirect_socket *sc, - struct ib_qp_cap *cap) +static int smb_direct_init_params(struct smbdirect_socket *sc) { struct smbdirect_socket_parameters *sp = &sc->parameters; - struct ib_device *device = sc->ib.dev; - int max_send_sges, max_rw_wrs, max_send_wrs; - unsigned int max_sge_per_wr, wrs_per_credit; + int max_send_sges; + unsigned int maxpages; /* need 3 more sge. because a SMB_DIRECT header, SMB2 header, * SMB2 response could be mapped. @@ -1895,67 +1927,20 @@ static int smb_direct_init_params(struct smbdirect_socket *sc, return -EINVAL; } - /* Calculate the number of work requests for RDMA R/W. - * The maximum number of pages which can be registered - * with one Memory region can be transferred with one - * R/W credit. And at least 4 work requests for each credit - * are needed for MR registration, RDMA R/W, local & remote - * MR invalidation. - */ - sc->rw_io.credits.num_pages = smb_direct_get_max_fr_pages(sc); - sc->rw_io.credits.max = DIV_ROUND_UP(sp->max_read_write_size, - (sc->rw_io.credits.num_pages - 1) * - PAGE_SIZE); + atomic_set(&sc->send_io.lcredits.count, sp->send_credit_target); - max_sge_per_wr = min_t(unsigned int, device->attrs.max_send_sge, - device->attrs.max_sge_rd); - max_sge_per_wr = max_t(unsigned int, max_sge_per_wr, - max_send_sges); - wrs_per_credit = max_t(unsigned int, 4, - DIV_ROUND_UP(sc->rw_io.credits.num_pages, - max_sge_per_wr) + 1); - max_rw_wrs = sc->rw_io.credits.max * wrs_per_credit; - - max_send_wrs = sp->send_credit_target + max_rw_wrs; - if (max_send_wrs > device->attrs.max_cqe || - max_send_wrs > device->attrs.max_qp_wr) { - pr_err("consider lowering send_credit_target = %d\n", - sp->send_credit_target); - pr_err("Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n", - device->attrs.max_cqe, device->attrs.max_qp_wr); - return -EINVAL; - } - - if (sp->recv_credit_max > device->attrs.max_cqe || - sp->recv_credit_max > device->attrs.max_qp_wr) { - pr_err("consider lowering receive_credit_max = %d\n", - sp->recv_credit_max); - pr_err("Possible CQE overrun, device reporting max_cpe %d max_qp_wr %d\n", - device->attrs.max_cqe, device->attrs.max_qp_wr); - return -EINVAL; - } - - if (device->attrs.max_send_sge < SMBDIRECT_SEND_IO_MAX_SGE) { - pr_err("warning: device max_send_sge = %d too small\n", - device->attrs.max_send_sge); - return -EINVAL; - } - if (device->attrs.max_recv_sge < SMBDIRECT_RECV_IO_MAX_SGE) { - pr_err("warning: device max_recv_sge = %d too small\n", - device->attrs.max_recv_sge); - return -EINVAL; - } + maxpages = DIV_ROUND_UP(sp->max_read_write_size, PAGE_SIZE); + sc->rw_io.credits.max = rdma_rw_mr_factor(sc->ib.dev, + sc->rdma.cm_id->port_num, + maxpages); + sc->rw_io.credits.num_pages = DIV_ROUND_UP(maxpages, sc->rw_io.credits.max); + /* add one extra in order to handle unaligned pages */ + sc->rw_io.credits.max += 1; sc->recv_io.credits.target = 1; atomic_set(&sc->rw_io.credits.count, sc->rw_io.credits.max); - cap->max_send_wr = max_send_wrs; - cap->max_recv_wr = sp->recv_credit_max; - cap->max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE; - cap->max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE; - cap->max_inline_data = 0; - cap->max_rdma_ctxs = sc->rw_io.credits.max; return 0; } @@ -2029,13 +2014,129 @@ static int smb_direct_create_pools(struct smbdirect_socket *sc) return -ENOMEM; } -static int smb_direct_create_qpair(struct smbdirect_socket *sc, - struct ib_qp_cap *cap) +static u32 smb_direct_rdma_rw_send_wrs(struct ib_device *dev, const struct ib_qp_init_attr *attr) +{ + /* + * This could be split out of rdma_rw_init_qp() + * and be a helper function next to rdma_rw_mr_factor() + * + * We can't check unlikely(rdma_rw_force_mr) here, + * but that is most likely 0 anyway. + */ + u32 factor; + + WARN_ON_ONCE(attr->port_num == 0); + + /* + * Each context needs at least one RDMA READ or WRITE WR. + * + * For some hardware we might need more, eventually we should ask the + * HCA driver for a multiplier here. + */ + factor = 1; + + /* + * If the device needs MRs to perform RDMA READ or WRITE operations, + * we'll need two additional MRs for the registrations and the + * invalidation. + */ + if (rdma_protocol_iwarp(dev, attr->port_num) || dev->attrs.max_sgl_rd) + factor += 2; /* inv + reg */ + + return factor * attr->cap.max_rdma_ctxs; +} + +static int smb_direct_create_qpair(struct smbdirect_socket *sc) { struct smbdirect_socket_parameters *sp = &sc->parameters; int ret; + struct ib_qp_cap qp_cap; struct ib_qp_init_attr qp_attr; - int pages_per_rw; + u32 max_send_wr; + u32 rdma_send_wr; + + /* + * Note that {rdma,ib}_create_qp() will call + * rdma_rw_init_qp() if cap->max_rdma_ctxs is not 0. + * It will adjust cap->max_send_wr to the required + * number of additional WRs for the RDMA RW operations. + * It will cap cap->max_send_wr to the device limit. + * + * +1 for ib_drain_qp + */ + qp_cap.max_send_wr = sp->send_credit_target + 1; + qp_cap.max_recv_wr = sp->recv_credit_max + 1; + qp_cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE; + qp_cap.max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE; + qp_cap.max_inline_data = 0; + qp_cap.max_rdma_ctxs = sc->rw_io.credits.max; + + /* + * Find out the number of max_send_wr + * after rdma_rw_init_qp() adjusted it. + * + * We only do it on a temporary variable, + * as rdma_create_qp() will trigger + * rdma_rw_init_qp() again. + */ + memset(&qp_attr, 0, sizeof(qp_attr)); + qp_attr.cap = qp_cap; + qp_attr.port_num = sc->rdma.cm_id->port_num; + rdma_send_wr = smb_direct_rdma_rw_send_wrs(sc->ib.dev, &qp_attr); + max_send_wr = qp_cap.max_send_wr + rdma_send_wr; + + if (qp_cap.max_send_wr > sc->ib.dev->attrs.max_cqe || + qp_cap.max_send_wr > sc->ib.dev->attrs.max_qp_wr) { + pr_err("Possible CQE overrun: max_send_wr %d\n", + qp_cap.max_send_wr); + pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n", + IB_DEVICE_NAME_MAX, + sc->ib.dev->name, + sc->ib.dev->attrs.max_cqe, + sc->ib.dev->attrs.max_qp_wr); + pr_err("consider lowering send_credit_target = %d\n", + sp->send_credit_target); + return -EINVAL; + } + + if (qp_cap.max_rdma_ctxs && + (max_send_wr >= sc->ib.dev->attrs.max_cqe || + max_send_wr >= sc->ib.dev->attrs.max_qp_wr)) { + pr_err("Possible CQE overrun: rdma_send_wr %d + max_send_wr %d = %d\n", + rdma_send_wr, qp_cap.max_send_wr, max_send_wr); + pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n", + IB_DEVICE_NAME_MAX, + sc->ib.dev->name, + sc->ib.dev->attrs.max_cqe, + sc->ib.dev->attrs.max_qp_wr); + pr_err("consider lowering send_credit_target = %d, max_rdma_ctxs = %d\n", + sp->send_credit_target, qp_cap.max_rdma_ctxs); + return -EINVAL; + } + + if (qp_cap.max_recv_wr > sc->ib.dev->attrs.max_cqe || + qp_cap.max_recv_wr > sc->ib.dev->attrs.max_qp_wr) { + pr_err("Possible CQE overrun: max_recv_wr %d\n", + qp_cap.max_recv_wr); + pr_err("device %.*s reporting max_cqe %d max_qp_wr %d\n", + IB_DEVICE_NAME_MAX, + sc->ib.dev->name, + sc->ib.dev->attrs.max_cqe, + sc->ib.dev->attrs.max_qp_wr); + pr_err("consider lowering receive_credit_max = %d\n", + sp->recv_credit_max); + return -EINVAL; + } + + if (qp_cap.max_send_sge > sc->ib.dev->attrs.max_send_sge || + qp_cap.max_recv_sge > sc->ib.dev->attrs.max_recv_sge) { + pr_err("device %.*s max_send_sge/max_recv_sge = %d/%d too small\n", + IB_DEVICE_NAME_MAX, + sc->ib.dev->name, + sc->ib.dev->attrs.max_send_sge, + sc->ib.dev->attrs.max_recv_sge); + return -EINVAL; + } sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0); if (IS_ERR(sc->ib.pd)) { @@ -2046,8 +2147,7 @@ static int smb_direct_create_qpair(struct smbdirect_socket *sc, } sc->ib.send_cq = ib_alloc_cq_any(sc->ib.dev, sc, - sp->send_credit_target + - cap->max_rdma_ctxs, + max_send_wr, IB_POLL_WORKQUEUE); if (IS_ERR(sc->ib.send_cq)) { pr_err("Can't create RDMA send CQ\n"); @@ -2057,7 +2157,7 @@ static int smb_direct_create_qpair(struct smbdirect_socket *sc, } sc->ib.recv_cq = ib_alloc_cq_any(sc->ib.dev, sc, - sp->recv_credit_max, + qp_cap.max_recv_wr, IB_POLL_WORKQUEUE); if (IS_ERR(sc->ib.recv_cq)) { pr_err("Can't create RDMA recv CQ\n"); @@ -2066,10 +2166,18 @@ static int smb_direct_create_qpair(struct smbdirect_socket *sc, goto err; } + /* + * We reset completely here! + * As the above use was just temporary + * to calc max_send_wr and rdma_send_wr. + * + * rdma_create_qp() will trigger rdma_rw_init_qp() + * again if max_rdma_ctxs is not 0. + */ memset(&qp_attr, 0, sizeof(qp_attr)); qp_attr.event_handler = smb_direct_qpair_handler; qp_attr.qp_context = sc; - qp_attr.cap = *cap; + qp_attr.cap = qp_cap; qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; qp_attr.qp_type = IB_QPT_RC; qp_attr.send_cq = sc->ib.send_cq; @@ -2085,18 +2193,6 @@ static int smb_direct_create_qpair(struct smbdirect_socket *sc, sc->ib.qp = sc->rdma.cm_id->qp; sc->rdma.cm_id->event_handler = smb_direct_cm_handler; - pages_per_rw = DIV_ROUND_UP(sp->max_read_write_size, PAGE_SIZE) + 1; - if (pages_per_rw > sc->ib.dev->attrs.max_sgl_rd) { - ret = ib_mr_pool_init(sc->ib.qp, &sc->ib.qp->rdma_mrs, - sc->rw_io.credits.max, IB_MR_TYPE_MEM_REG, - sc->rw_io.credits.num_pages, 0); - if (ret) { - pr_err("failed to init mr pool count %zu pages %zu\n", - sc->rw_io.credits.max, sc->rw_io.credits.num_pages); - goto err; - } - } - return 0; err: if (sc->ib.qp) { @@ -2154,8 +2250,8 @@ static int smb_direct_prepare(struct ksmbd_transport *t) return -ECONNABORTED; ret = smb_direct_check_recvmsg(recvmsg); - if (ret == -ECONNABORTED) - goto out; + if (ret) + goto put; req = (struct smbdirect_negotiate_req *)recvmsg->packet; sp->max_recv_size = min_t(int, sp->max_recv_size, @@ -2170,23 +2266,46 @@ static int smb_direct_prepare(struct ksmbd_transport *t) sc->recv_io.credits.target = min_t(u16, sc->recv_io.credits.target, sp->recv_credit_max); sc->recv_io.credits.target = max_t(u16, sc->recv_io.credits.target, 1); - ret = smb_direct_send_negotiate_response(sc, ret); -out: +put: spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags); sc->recv_io.reassembly.queue_length--; list_del(&recvmsg->list); spin_unlock_irqrestore(&sc->recv_io.reassembly.lock, flags); put_recvmsg(sc, recvmsg); + if (ret == -ECONNABORTED) + return ret; + + if (ret) + goto respond; + + /* + * We negotiated with success, so we need to refill the recv queue. + * We do that with sc->idle.immediate_work still being disabled + * via smbdirect_socket_init(), so that queue_work(sc->workqueue, + * &sc->idle.immediate_work) in smb_direct_post_recv_credits() + * is a no-op. + * + * The message that grants the credits to the client is + * the negotiate response. + */ + INIT_WORK(&sc->recv_io.posted.refill_work, smb_direct_post_recv_credits); + smb_direct_post_recv_credits(&sc->recv_io.posted.refill_work); + if (unlikely(sc->first_error)) + return sc->first_error; + INIT_WORK(&sc->idle.immediate_work, smb_direct_send_immediate_work); + +respond: + ret = smb_direct_send_negotiate_response(sc, ret); + return ret; } static int smb_direct_connect(struct smbdirect_socket *sc) { - struct ib_qp_cap qp_cap; int ret; - ret = smb_direct_init_params(sc, &qp_cap); + ret = smb_direct_init_params(sc); if (ret) { pr_err("Can't configure RDMA parameters\n"); return ret; @@ -2198,7 +2317,7 @@ static int smb_direct_connect(struct smbdirect_socket *sc) return ret; } - ret = smb_direct_create_qpair(sc, &qp_cap); + ret = smb_direct_create_qpair(sc); if (ret) { pr_err("Can't accept RDMA client: %d\n", ret); return ret; diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c index 2d78e94072a0..e142bac4f9f8 100644 --- a/fs/sysfs/group.c +++ b/fs/sysfs/group.c @@ -498,17 +498,26 @@ int compat_only_sysfs_link_entry_to_kobj(struct kobject *kobj, } EXPORT_SYMBOL_GPL(compat_only_sysfs_link_entry_to_kobj); -static int sysfs_group_attrs_change_owner(struct kernfs_node *grp_kn, +static int sysfs_group_attrs_change_owner(struct kobject *kobj, + struct kernfs_node *grp_kn, const struct attribute_group *grp, struct iattr *newattrs) { struct kernfs_node *kn; - int error; + int error, i; + umode_t mode; if (grp->attrs) { struct attribute *const *attr; - for (attr = grp->attrs; *attr; attr++) { + for (i = 0, attr = grp->attrs; *attr; i++, attr++) { + if (grp->is_visible) { + mode = grp->is_visible(kobj, *attr, i); + if (mode & SYSFS_GROUP_INVISIBLE) + break; + if (!mode) + continue; + } kn = kernfs_find_and_get(grp_kn, (*attr)->name); if (!kn) return -ENOENT; @@ -523,7 +532,14 @@ static int sysfs_group_attrs_change_owner(struct kernfs_node *grp_kn, if (grp->bin_attrs) { const struct bin_attribute *const *bin_attr; - for (bin_attr = grp->bin_attrs; *bin_attr; bin_attr++) { + for (i = 0, bin_attr = grp->bin_attrs; *bin_attr; i++, bin_attr++) { + if (grp->is_bin_visible) { + mode = grp->is_bin_visible(kobj, *bin_attr, i); + if (mode & SYSFS_GROUP_INVISIBLE) + break; + if (!mode) + continue; + } kn = kernfs_find_and_get(grp_kn, (*bin_attr)->attr.name); if (!kn) return -ENOENT; @@ -573,7 +589,7 @@ int sysfs_group_change_owner(struct kobject *kobj, error = kernfs_setattr(grp_kn, &newattrs); if (!error) - error = sysfs_group_attrs_change_owner(grp_kn, grp, &newattrs); + error = sysfs_group_attrs_change_owner(kobj, grp_kn, grp, &newattrs); kernfs_put(grp_kn); diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig index 8930d5254e1d..b99da294e9a3 100644 --- a/fs/xfs/Kconfig +++ b/fs/xfs/Kconfig @@ -119,6 +119,15 @@ config XFS_RT See the xfs man page in section 5 for additional information. + This option is mandatory to support zoned block devices. For these + devices, the realtime subvolume must be backed by a zoned block + device and a regular block device used as the main device (for + metadata). If the zoned block device is a host-managed SMR hard-disk + containing conventional zones at the beginning of its address space, + XFS will use the disk conventional zones as the main device and the + remaining sequential write required zones as the backing storage for + the realtime subvolume. + If unsure, say N. config XFS_DRAIN_INTENTS @@ -156,7 +165,7 @@ config XFS_ONLINE_SCRUB_STATS bool "XFS online metadata check usage data collection" default y depends on XFS_ONLINE_SCRUB - select DEBUG_FS + depends on DEBUG_FS help If you say Y here, the kernel will gather usage data about the online metadata check subsystem. This includes the number diff --git a/fs/xfs/scrub/nlinks.c b/fs/xfs/scrub/nlinks.c index 26721fab5cab..091c79e432e5 100644 --- a/fs/xfs/scrub/nlinks.c +++ b/fs/xfs/scrub/nlinks.c @@ -376,6 +376,36 @@ xchk_nlinks_collect_pptr( return error; } +static uint +xchk_nlinks_ilock_dir( + struct xfs_inode *ip) +{ + uint lock_mode = XFS_ILOCK_SHARED; + + /* + * We're going to scan the directory entries, so we must be ready to + * pull the data fork mappings into memory if they aren't already. + */ + if (xfs_need_iread_extents(&ip->i_df)) + lock_mode = XFS_ILOCK_EXCL; + + /* + * We're going to scan the parent pointers, so we must be ready to + * pull the attr fork mappings into memory if they aren't already. + */ + if (xfs_has_parent(ip->i_mount) && xfs_inode_has_attr_fork(ip) && + xfs_need_iread_extents(&ip->i_af)) + lock_mode = XFS_ILOCK_EXCL; + + /* + * Take the IOLOCK so that other threads cannot start a directory + * update while we're scanning. + */ + lock_mode |= XFS_IOLOCK_SHARED; + xfs_ilock(ip, lock_mode); + return lock_mode; +} + /* Walk a directory to bump the observed link counts of the children. */ STATIC int xchk_nlinks_collect_dir( @@ -394,8 +424,7 @@ xchk_nlinks_collect_dir( return 0; /* Prevent anyone from changing this directory while we walk it. */ - xfs_ilock(dp, XFS_IOLOCK_SHARED); - lock_mode = xfs_ilock_data_map_shared(dp); + lock_mode = xchk_nlinks_ilock_dir(dp); /* * The dotdot entry of an unlinked directory still points to the last @@ -452,7 +481,6 @@ xchk_nlinks_collect_dir( xchk_iscan_abort(&xnc->collect_iscan); out_unlock: xfs_iunlock(dp, lock_mode); - xfs_iunlock(dp, XFS_IOLOCK_SHARED); return error; } diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 773d959965dc..47edf3041631 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1751,7 +1751,7 @@ xfs_init_buftarg( const char *descr) { /* The maximum size of the buftarg is only known once the sb is read. */ - btp->bt_nr_sectors = (xfs_daddr_t)-1; + btp->bt_nr_sectors = XFS_BUF_DADDR_MAX; /* Set up device logical sector size mask */ btp->bt_logical_sectorsize = logical_sectorsize; diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 8fa7bdf59c91..e25cd2a160f3 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h @@ -22,6 +22,7 @@ extern struct kmem_cache *xfs_buf_cache; */ struct xfs_buf; +#define XFS_BUF_DADDR_MAX ((xfs_daddr_t) S64_MAX) #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) #define XBF_READ (1u << 0) /* buffer intended for reading from device */ diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index f046d1215b04..b871dfde372b 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -236,7 +236,6 @@ typedef struct xfs_mount { bool m_update_sb; /* sb needs update in mount */ unsigned int m_max_open_zones; unsigned int m_zonegc_low_space; - struct xfs_mru_cache *m_zone_cache; /* Inode to open zone cache */ /* max_atomic_write mount option value */ unsigned long long m_awu_max_bytes; diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index e85a156dc17d..1067ebb3b001 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -102,7 +102,7 @@ static const struct constant_table dax_param_enums[] = { * Table driven mount option parser. */ enum { - Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, + Op_deprecated, Opt_logbufs, Opt_logbsize, Opt_logdev, Opt_rtdev, Opt_wsync, Opt_noalign, Opt_swalloc, Opt_sunit, Opt_swidth, Opt_nouuid, Opt_grpid, Opt_nogrpid, Opt_bsdgroups, Opt_sysvgroups, Opt_allocsize, Opt_norecovery, Opt_inode64, Opt_inode32, @@ -114,7 +114,21 @@ enum { Opt_lifetime, Opt_nolifetime, Opt_max_atomic_write, }; +#define fsparam_dead(NAME) \ + __fsparam(NULL, (NAME), Op_deprecated, fs_param_deprecated, NULL) + static const struct fs_parameter_spec xfs_fs_parameters[] = { + /* + * These mount options were supposed to be deprecated in September 2025 + * but the deprecation warning was buggy, so not all users were + * notified. The deprecation is now obnoxiously loud and postponed to + * September 2030. + */ + fsparam_dead("attr2"), + fsparam_dead("noattr2"), + fsparam_dead("ikeep"), + fsparam_dead("noikeep"), + fsparam_u32("logbufs", Opt_logbufs), fsparam_string("logbsize", Opt_logbsize), fsparam_string("logdev", Opt_logdev), @@ -786,6 +800,12 @@ xfs_fs_evict_inode( truncate_inode_pages_final(&inode->i_data); clear_inode(inode); + + if (IS_ENABLED(CONFIG_XFS_RT) && + S_ISREG(inode->i_mode) && inode->i_private) { + xfs_open_zone_put(inode->i_private); + inode->i_private = NULL; + } } static void @@ -1373,16 +1393,25 @@ suffix_kstrtoull( static inline void xfs_fs_warn_deprecated( struct fs_context *fc, - struct fs_parameter *param, - uint64_t flag, - bool value) + struct fs_parameter *param) { - /* Don't print the warning if reconfiguring and current mount point - * already had the flag set + /* + * Always warn about someone passing in a deprecated mount option. + * Previously we wouldn't print the warning if we were reconfiguring + * and current mount point already had the flag set, but that was not + * the right thing to do. + * + * Many distributions mount the root filesystem with no options in the + * initramfs and rely on mount -a to remount the root fs with the + * options in fstab. However, the old behavior meant that there would + * never be a warning about deprecated mount options for the root fs in + * /etc/fstab. On a single-fs system, that means no warning at all. + * + * Compounding this problem are distribution scripts that copy + * /proc/mounts to fstab, which means that we can't remove mount + * options unless we're 100% sure they have only ever been advertised + * in /proc/mounts in response to explicitly provided mount options. */ - if ((fc->purpose & FS_CONTEXT_FOR_RECONFIGURE) && - !!(XFS_M(fc->root->d_sb)->m_features & flag) == value) - return; xfs_warn(fc->s_fs_info, "%s mount option is deprecated.", param->key); } @@ -1408,6 +1437,9 @@ xfs_fs_parse_param( return opt; switch (opt) { + case Op_deprecated: + xfs_fs_warn_deprecated(fc, param); + return 0; case Opt_logbufs: parsing_mp->m_logbufs = result.uint_32; return 0; @@ -1528,7 +1560,6 @@ xfs_fs_parse_param( xfs_mount_set_dax_mode(parsing_mp, result.uint_32); return 0; #endif - /* Following mount options will be removed in September 2025 */ case Opt_max_open_zones: parsing_mp->m_max_open_zones = result.uint_32; return 0; @@ -2221,7 +2252,7 @@ xfs_init_fs_context( struct xfs_mount *mp; int i; - mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL | __GFP_NOFAIL); + mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL); if (!mp) return -ENOMEM; diff --git a/fs/xfs/xfs_zone_alloc.c b/fs/xfs/xfs_zone_alloc.c index 1147bacb2da8..23cdab4515bb 100644 --- a/fs/xfs/xfs_zone_alloc.c +++ b/fs/xfs/xfs_zone_alloc.c @@ -26,14 +26,22 @@ #include "xfs_trace.h" #include "xfs_mru_cache.h" +static void +xfs_open_zone_free_rcu( + struct callback_head *cb) +{ + struct xfs_open_zone *oz = container_of(cb, typeof(*oz), oz_rcu); + + xfs_rtgroup_rele(oz->oz_rtg); + kfree(oz); +} + void xfs_open_zone_put( struct xfs_open_zone *oz) { - if (atomic_dec_and_test(&oz->oz_ref)) { - xfs_rtgroup_rele(oz->oz_rtg); - kfree(oz); - } + if (atomic_dec_and_test(&oz->oz_ref)) + call_rcu(&oz->oz_rcu, xfs_open_zone_free_rcu); } static inline uint32_t @@ -614,14 +622,25 @@ static inline enum rw_hint xfs_inode_write_hint(struct xfs_inode *ip) } /* - * Try to pack inodes that are written back after they were closed tight instead - * of trying to open new zones for them or spread them to the least recently - * used zone. This optimizes the data layout for workloads that untar or copy - * a lot of small files. Right now this does not separate multiple such + * Try to tightly pack small files that are written back after they were closed + * instead of trying to open new zones for them or spread them to the least + * recently used zone. This optimizes the data layout for workloads that untar + * or copy a lot of small files. Right now this does not separate multiple such * streams. */ static inline bool xfs_zoned_pack_tight(struct xfs_inode *ip) { + struct xfs_mount *mp = ip->i_mount; + size_t zone_capacity = + XFS_FSB_TO_B(mp, mp->m_groups[XG_TYPE_RTG].blocks); + + /* + * Do not pack write files that are already using a full zone to avoid + * fragmentation. + */ + if (i_size_read(VFS_I(ip)) >= zone_capacity) + return false; + return !inode_is_open_for_write(VFS_I(ip)) && !(ip->i_diflags & XFS_DIFLAG_APPEND); } @@ -745,98 +764,55 @@ xfs_mark_rtg_boundary( ioend->io_flags |= IOMAP_IOEND_BOUNDARY; } -/* - * Cache the last zone written to for an inode so that it is considered first - * for subsequent writes. - */ -struct xfs_zone_cache_item { - struct xfs_mru_cache_elem mru; - struct xfs_open_zone *oz; -}; - -static inline struct xfs_zone_cache_item * -xfs_zone_cache_item(struct xfs_mru_cache_elem *mru) -{ - return container_of(mru, struct xfs_zone_cache_item, mru); -} - -static void -xfs_zone_cache_free_func( - void *data, - struct xfs_mru_cache_elem *mru) -{ - struct xfs_zone_cache_item *item = xfs_zone_cache_item(mru); - - xfs_open_zone_put(item->oz); - kfree(item); -} - /* * Check if we have a cached last open zone available for the inode and * if yes return a reference to it. */ static struct xfs_open_zone * -xfs_cached_zone( - struct xfs_mount *mp, - struct xfs_inode *ip) +xfs_get_cached_zone( + struct xfs_inode *ip) { - struct xfs_mru_cache_elem *mru; - struct xfs_open_zone *oz; + struct xfs_open_zone *oz; - mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino); - if (!mru) - return NULL; - oz = xfs_zone_cache_item(mru)->oz; + rcu_read_lock(); + oz = VFS_I(ip)->i_private; if (oz) { /* * GC only steals open zones at mount time, so no GC zones * should end up in the cache. */ ASSERT(!oz->oz_is_gc); - ASSERT(atomic_read(&oz->oz_ref) > 0); - atomic_inc(&oz->oz_ref); + if (!atomic_inc_not_zero(&oz->oz_ref)) + oz = NULL; } - xfs_mru_cache_done(mp->m_zone_cache); + rcu_read_unlock(); + return oz; } /* - * Update the last used zone cache for a given inode. + * Stash our zone in the inode so that is is reused for future allocations. * - * The caller must have a reference on the open zone. + * The open_zone structure will be pinned until either the inode is freed or + * until the cached open zone is replaced with a different one because the + * current one was full when we tried to use it. This means we keep any + * open zone around forever as long as any inode that used it for the last + * write is cached, which slightly increases the memory use of cached inodes + * that were every written to, but significantly simplifies the cached zone + * lookup. Because the open_zone is clearly marked as full when all data + * in the underlying RTG was written, the caching is always safe. */ static void -xfs_zone_cache_create_association( - struct xfs_inode *ip, - struct xfs_open_zone *oz) +xfs_set_cached_zone( + struct xfs_inode *ip, + struct xfs_open_zone *oz) { - struct xfs_mount *mp = ip->i_mount; - struct xfs_zone_cache_item *item = NULL; - struct xfs_mru_cache_elem *mru; + struct xfs_open_zone *old_oz; - ASSERT(atomic_read(&oz->oz_ref) > 0); atomic_inc(&oz->oz_ref); - - mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino); - if (mru) { - /* - * If we have an association already, update it to point to the - * new zone. - */ - item = xfs_zone_cache_item(mru); - xfs_open_zone_put(item->oz); - item->oz = oz; - xfs_mru_cache_done(mp->m_zone_cache); - return; - } - - item = kmalloc(sizeof(*item), GFP_KERNEL); - if (!item) { - xfs_open_zone_put(oz); - return; - } - item->oz = oz; - xfs_mru_cache_insert(mp->m_zone_cache, ip->i_ino, &item->mru); + old_oz = xchg(&VFS_I(ip)->i_private, oz); + if (old_oz) + xfs_open_zone_put(old_oz); } static void @@ -880,15 +856,14 @@ xfs_zone_alloc_and_submit( * the inode is still associated with a zone and use that if so. */ if (!*oz) - *oz = xfs_cached_zone(mp, ip); + *oz = xfs_get_cached_zone(ip); if (!*oz) { select_zone: *oz = xfs_select_zone(mp, write_hint, pack_tight); if (!*oz) goto out_error; - - xfs_zone_cache_create_association(ip, *oz); + xfs_set_cached_zone(ip, *oz); } alloc_len = xfs_zone_alloc_blocks(*oz, XFS_B_TO_FSB(mp, ioend->io_size), @@ -966,6 +941,12 @@ xfs_free_open_zones( xfs_open_zone_put(oz); } spin_unlock(&zi->zi_open_zones_lock); + + /* + * Wait for all open zones to be freed so that they drop the group + * references: + */ + rcu_barrier(); } struct xfs_init_zones { @@ -1279,14 +1260,6 @@ xfs_mount_zones( error = xfs_zone_gc_mount(mp); if (error) goto out_free_zone_info; - - /* - * Set up a mru cache to track inode to open zone for data placement - * purposes. The magic values for group count and life time is the - * same as the defaults for file streams, which seems sane enough. - */ - xfs_mru_cache_create(&mp->m_zone_cache, mp, - 5000, 10, xfs_zone_cache_free_func); return 0; out_free_zone_info: @@ -1300,5 +1273,4 @@ xfs_unmount_zones( { xfs_zone_gc_unmount(mp); xfs_free_zone_info(mp->m_zone_info); - xfs_mru_cache_destroy(mp->m_zone_cache); } diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c index 064cd1a857a0..109877d9a6bf 100644 --- a/fs/xfs/xfs_zone_gc.c +++ b/fs/xfs/xfs_zone_gc.c @@ -491,21 +491,6 @@ xfs_zone_gc_select_victim( struct xfs_rtgroup *victim_rtg = NULL; unsigned int bucket; - if (xfs_is_shutdown(mp)) - return false; - - if (iter->victim_rtg) - return true; - - /* - * Don't start new work if we are asked to stop or park. - */ - if (kthread_should_stop() || kthread_should_park()) - return false; - - if (!xfs_zoned_need_gc(mp)) - return false; - spin_lock(&zi->zi_used_buckets_lock); for (bucket = 0; bucket < XFS_ZONE_USED_BUCKETS; bucket++) { victim_rtg = xfs_zone_gc_pick_victim_from(mp, bucket); @@ -975,6 +960,27 @@ xfs_zone_gc_reset_zones( } while (next); } +static bool +xfs_zone_gc_should_start_new_work( + struct xfs_zone_gc_data *data) +{ + if (xfs_is_shutdown(data->mp)) + return false; + if (!xfs_zone_gc_space_available(data)) + return false; + + if (!data->iter.victim_rtg) { + if (kthread_should_stop() || kthread_should_park()) + return false; + if (!xfs_zoned_need_gc(data->mp)) + return false; + if (!xfs_zone_gc_select_victim(data)) + return false; + } + + return true; +} + /* * Handle the work to read and write data for GC and to reset the zones, * including handling all completions. @@ -982,7 +988,7 @@ xfs_zone_gc_reset_zones( * Note that the order of the chunks is preserved so that we don't undo the * optimal order established by xfs_zone_gc_query(). */ -static bool +static void xfs_zone_gc_handle_work( struct xfs_zone_gc_data *data) { @@ -996,30 +1002,22 @@ xfs_zone_gc_handle_work( zi->zi_reset_list = NULL; spin_unlock(&zi->zi_reset_list_lock); - if (!xfs_zone_gc_select_victim(data) || - !xfs_zone_gc_space_available(data)) { - if (list_empty(&data->reading) && - list_empty(&data->writing) && - list_empty(&data->resetting) && - !reset_list) - return false; - } - - __set_current_state(TASK_RUNNING); - try_to_freeze(); - - if (reset_list) + if (reset_list) { + set_current_state(TASK_RUNNING); xfs_zone_gc_reset_zones(data, reset_list); + } list_for_each_entry_safe(chunk, next, &data->resetting, entry) { if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE) break; + set_current_state(TASK_RUNNING); xfs_zone_gc_finish_reset(chunk); } list_for_each_entry_safe(chunk, next, &data->writing, entry) { if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE) break; + set_current_state(TASK_RUNNING); xfs_zone_gc_finish_chunk(chunk); } @@ -1027,15 +1025,18 @@ xfs_zone_gc_handle_work( list_for_each_entry_safe(chunk, next, &data->reading, entry) { if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE) break; + set_current_state(TASK_RUNNING); xfs_zone_gc_write_chunk(chunk); } blk_finish_plug(&plug); - blk_start_plug(&plug); - while (xfs_zone_gc_start_chunk(data)) - ; - blk_finish_plug(&plug); - return true; + if (xfs_zone_gc_should_start_new_work(data)) { + set_current_state(TASK_RUNNING); + blk_start_plug(&plug); + while (xfs_zone_gc_start_chunk(data)) + ; + blk_finish_plug(&plug); + } } /* @@ -1059,8 +1060,18 @@ xfs_zoned_gcd( for (;;) { set_current_state(TASK_INTERRUPTIBLE | TASK_FREEZABLE); xfs_set_zonegc_running(mp); - if (xfs_zone_gc_handle_work(data)) + + xfs_zone_gc_handle_work(data); + + /* + * Only sleep if nothing set the state to running. Else check for + * work again as someone might have queued up more work and woken + * us in the meantime. + */ + if (get_current_state() == TASK_RUNNING) { + try_to_freeze(); continue; + } if (list_empty(&data->reading) && list_empty(&data->writing) && diff --git a/fs/xfs/xfs_zone_priv.h b/fs/xfs/xfs_zone_priv.h index 35e6de3d25ed..4322e26dd99a 100644 --- a/fs/xfs/xfs_zone_priv.h +++ b/fs/xfs/xfs_zone_priv.h @@ -44,6 +44,8 @@ struct xfs_open_zone { * the life time of an open zone. */ struct xfs_rtgroup *oz_rtg; + + struct rcu_head oz_rcu; }; /* diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h index cd7ee4df9045..81e603839c4a 100644 --- a/include/linux/arm_ffa.h +++ b/include/linux/arm_ffa.h @@ -338,6 +338,7 @@ struct ffa_mem_region_attributes { * an `struct ffa_mem_region_addr_range`. */ u32 composite_off; + u8 impdef_val[16]; u64 reserved; }; @@ -417,15 +418,31 @@ struct ffa_mem_region { #define CONSTITUENTS_OFFSET(x) \ (offsetof(struct ffa_composite_mem_region, constituents[x])) +#define FFA_EMAD_HAS_IMPDEF_FIELD(version) ((version) >= FFA_VERSION_1_2) +#define FFA_MEM_REGION_HAS_EP_MEM_OFFSET(version) ((version) > FFA_VERSION_1_0) + +static inline u32 ffa_emad_size_get(u32 ffa_version) +{ + u32 sz; + struct ffa_mem_region_attributes *ep_mem_access; + + if (FFA_EMAD_HAS_IMPDEF_FIELD(ffa_version)) + sz = sizeof(*ep_mem_access); + else + sz = sizeof(*ep_mem_access) - sizeof(ep_mem_access->impdef_val); + + return sz; +} + static inline u32 ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version) { - u32 offset = count * sizeof(struct ffa_mem_region_attributes); + u32 offset = count * ffa_emad_size_get(ffa_version); /* * Earlier to v1.1, the endpoint memory descriptor array started at * offset 32(i.e. offset of ep_mem_offset in the current structure) */ - if (ffa_version <= FFA_VERSION_1_0) + if (!FFA_MEM_REGION_HAS_EP_MEM_OFFSET(ffa_version)) offset += offsetof(struct ffa_mem_region, ep_mem_offset); else offset += sizeof(struct ffa_mem_region); diff --git a/include/linux/gpio/regmap.h b/include/linux/gpio/regmap.h index 622a2939ebe0..87983a5f3681 100644 --- a/include/linux/gpio/regmap.h +++ b/include/linux/gpio/regmap.h @@ -38,6 +38,10 @@ struct regmap; * offset to a register/bitmask pair. If not * given the default gpio_regmap_simple_xlate() * is used. + * @fixed_direction_output: + * (Optional) Bitmap representing the fixed direction of + * the GPIO lines. Useful when there are GPIO lines with a + * fixed direction mixed together in the same register. * @drvdata: (Optional) Pointer to driver specific data which is * not used by gpio-remap but is provided "as is" to the * driver callback(s). @@ -85,6 +89,7 @@ struct gpio_regmap_config { int reg_stride; int ngpio_per_reg; struct irq_domain *irq_domain; + unsigned long *fixed_direction_output; #ifdef CONFIG_REGMAP_IRQ struct regmap_irq_chip *regmap_irq_chip; diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 9ecc70baaca9..8d0e703bc929 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -434,6 +434,7 @@ enum { HCI_USER_CHANNEL, HCI_EXT_CONFIGURED, HCI_LE_ADV, + HCI_LE_ADV_0, HCI_LE_PER_ADV, HCI_LE_SCAN, HCI_SSP_ENABLED, diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 2924c2bf2a98..b8100dbfe5d7 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -244,6 +244,7 @@ struct adv_info { bool enabled; bool pending; bool periodic; + bool periodic_enabled; __u8 mesh; __u8 instance; __u8 handle; diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 4bb0eaedda18..00e182a22720 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -38,8 +38,8 @@ #define L2CAP_DEFAULT_TX_WINDOW 63 #define L2CAP_DEFAULT_EXT_WINDOW 0x3FFF #define L2CAP_DEFAULT_MAX_TX 3 -#define L2CAP_DEFAULT_RETRANS_TO 2 /* seconds */ -#define L2CAP_DEFAULT_MONITOR_TO 12 /* seconds */ +#define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */ +#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ #define L2CAP_DEFAULT_MAX_PDU_SIZE 1492 /* Sized for AMP packet */ #define L2CAP_DEFAULT_ACK_TO 200 #define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 74edea06985b..bca0333f1e99 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -853,7 +853,7 @@ struct mgmt_cp_set_mesh { __le16 window; __le16 period; __u8 num_ad_types; - __u8 ad_types[]; + __u8 ad_types[] __counted_by(num_ad_types); } __packed; #define MGMT_SET_MESH_RECEIVER_SIZE 6 diff --git a/include/net/tcp.h b/include/net/tcp.h index 190b3714e93b..4fd6d8d1230d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -373,7 +373,7 @@ void tcp_delack_timer_handler(struct sock *sk); int tcp_ioctl(struct sock *sk, int cmd, int *karg); enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb); void tcp_rcv_established(struct sock *sk, struct sk_buff *skb); -void tcp_rcvbuf_grow(struct sock *sk); +void tcp_rcvbuf_grow(struct sock *sk, u32 newval); void tcp_rcv_space_adjust(struct sock *sk); int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); void tcp_twsk_destructor(struct sock *sk); diff --git a/include/net/tls.h b/include/net/tls.h index f2af113728aa..ebd2550280ae 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -454,25 +454,26 @@ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) /* Log all TLS record header TCP sequences in [seq, seq+len] */ static inline void -tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len) +tls_offload_rx_resync_async_request_start(struct tls_offload_resync_async *resync_async, + __be32 seq, u16 len) { - struct tls_context *tls_ctx = tls_get_ctx(sk); - struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); - - atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) | + atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) | ((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC); - rx_ctx->resync_async->loglen = 0; - rx_ctx->resync_async->rcd_delta = 0; + resync_async->loglen = 0; + resync_async->rcd_delta = 0; } static inline void -tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq) +tls_offload_rx_resync_async_request_end(struct tls_offload_resync_async *resync_async, + __be32 seq) { - struct tls_context *tls_ctx = tls_get_ctx(sk); - struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); + atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) | RESYNC_REQ); +} - atomic64_set(&rx_ctx->resync_async->req, - ((u64)ntohl(seq) << 32) | RESYNC_REQ); +static inline void +tls_offload_rx_resync_async_request_cancel(struct tls_offload_resync_async *resync_async) +{ + atomic64_set(&resync_async->req, 0); } static inline void diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index 9d2c36c6a0ed..6757233bd064 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -218,6 +218,9 @@ TRACE_EVENT(tcp_rcvbuf_grow, __field(__u32, space) __field(__u32, ooo_space) __field(__u32, rcvbuf) + __field(__u32, rcv_ssthresh) + __field(__u32, window_clamp) + __field(__u32, rcv_wnd) __field(__u8, scaling_ratio) __field(__u16, sport) __field(__u16, dport) @@ -245,6 +248,9 @@ TRACE_EVENT(tcp_rcvbuf_grow, tp->rcv_nxt; __entry->rcvbuf = sk->sk_rcvbuf; + __entry->rcv_ssthresh = tp->rcv_ssthresh; + __entry->window_clamp = tp->window_clamp; + __entry->rcv_wnd = tp->rcv_wnd; __entry->scaling_ratio = tp->scaling_ratio; __entry->sport = ntohs(inet->inet_sport); __entry->dport = ntohs(inet->inet_dport); @@ -264,11 +270,14 @@ TRACE_EVENT(tcp_rcvbuf_grow, ), TP_printk("time=%u rtt_us=%u copied=%u inq=%u space=%u ooo=%u scaling_ratio=%u rcvbuf=%u " + "rcv_ssthresh=%u window_clamp=%u rcv_wnd=%u " "family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 " "saddrv6=%pI6c daddrv6=%pI6c skaddr=%p sock_cookie=%llx", __entry->time, __entry->rtt_us, __entry->copied, __entry->inq, __entry->space, __entry->ooo_space, __entry->scaling_ratio, __entry->rcvbuf, + __entry->rcv_ssthresh, __entry->window_clamp, + __entry->rcv_wnd, show_family_name(__entry->family), __entry->sport, __entry->dport, __entry->saddr, __entry->daddr, diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index 40ff19f52a8d..517489a7ec60 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -1013,6 +1013,20 @@ struct drm_xe_vm_destroy { * valid on VMs with DRM_XE_VM_CREATE_FLAG_FAULT_MODE set. The CPU address * mirror flag are only valid for DRM_XE_VM_BIND_OP_MAP operations, the BO * handle MBZ, and the BO offset MBZ. + * - %DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET - Can be used in combination with + * %DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR to reset madvises when the underlying + * CPU address space range is unmapped (typically with munmap(2) or brk(2)). + * The madvise values set with &DRM_IOCTL_XE_MADVISE are reset to the values + * that were present immediately after the &DRM_IOCTL_XE_VM_BIND. + * The reset GPU virtual address range is the intersection of the range bound + * using &DRM_IOCTL_XE_VM_BIND and the virtual CPU address space range + * unmapped. + * This functionality is present to mimic the behaviour of CPU address space + * madvises set using madvise(2), which are typically reset on unmap. + * Note: free(3) may or may not call munmap(2) and/or brk(2), and may thus + * not invoke autoreset. Neither will stack variables going out of scope. + * Therefore it's recommended to always explicitly reset the madvises when + * freeing the memory backing a region used in a &DRM_IOCTL_XE_MADVISE call. * * The @prefetch_mem_region_instance for %DRM_XE_VM_BIND_OP_PREFETCH can also be: * - %DRM_XE_CONSULT_MEM_ADVISE_PREF_LOC, which ensures prefetching occurs in @@ -1119,6 +1133,7 @@ struct drm_xe_vm_bind_op { #define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3) #define DRM_XE_VM_BIND_FLAG_CHECK_PXP (1 << 4) #define DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR (1 << 5) +#define DRM_XE_VM_BIND_FLAG_MADVISE_AUTORESET (1 << 6) /** @flags: Bind flags */ __u32 flags; diff --git a/io_uring/fdinfo.c b/io_uring/fdinfo.c index ff3364531c77..294c75a8a3bd 100644 --- a/io_uring/fdinfo.c +++ b/io_uring/fdinfo.c @@ -59,7 +59,6 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) { struct io_overflow_cqe *ocqe; struct io_rings *r = ctx->rings; - struct rusage sq_usage; unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1; unsigned int sq_head = READ_ONCE(r->sq.head); unsigned int sq_tail = READ_ONCE(r->sq.tail); @@ -152,14 +151,15 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) * thread termination. */ if (tsk) { + u64 usec; + get_task_struct(tsk); rcu_read_unlock(); - getrusage(tsk, RUSAGE_SELF, &sq_usage); + usec = io_sq_cpu_usec(tsk); put_task_struct(tsk); sq_pid = sq->task_pid; sq_cpu = sq->sq_cpu; - sq_total_time = (sq_usage.ru_stime.tv_sec * 1000000 - + sq_usage.ru_stime.tv_usec); + sq_total_time = usec; sq_work_time = sq->work_time; } else { rcu_read_unlock(); diff --git a/io_uring/filetable.c b/io_uring/filetable.c index a21660e3145a..794ef95df293 100644 --- a/io_uring/filetable.c +++ b/io_uring/filetable.c @@ -57,7 +57,7 @@ void io_free_file_tables(struct io_ring_ctx *ctx, struct io_file_table *table) static int io_install_fixed_file(struct io_ring_ctx *ctx, struct file *file, u32 slot_index) - __must_hold(&req->ctx->uring_lock) + __must_hold(&ctx->uring_lock) { struct io_rsrc_node *node; diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 820ef0527666..296667ba712c 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -879,7 +879,7 @@ static inline struct io_cqe io_init_cqe(u64 user_data, s32 res, u32 cflags) } static __cold void io_cqe_overflow(struct io_ring_ctx *ctx, struct io_cqe *cqe, - struct io_big_cqe *big_cqe) + struct io_big_cqe *big_cqe) { struct io_overflow_cqe *ocqe; diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index aad655e38672..a727e020fe03 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -155,6 +155,27 @@ static int io_provided_buffers_select(struct io_kiocb *req, size_t *len, return 1; } +static bool io_should_commit(struct io_kiocb *req, unsigned int issue_flags) +{ + /* + * If we came in unlocked, we have no choice but to consume the + * buffer here, otherwise nothing ensures that the buffer won't + * get used by others. This does mean it'll be pinned until the + * IO completes, coming in unlocked means we're being called from + * io-wq context and there may be further retries in async hybrid + * mode. For the locked case, the caller must call commit when + * the transfer completes (or if we get -EAGAIN and must poll of + * retry). + */ + if (issue_flags & IO_URING_F_UNLOCKED) + return true; + + /* uring_cmd commits kbuf upfront, no need to auto-commit */ + if (!io_file_can_poll(req) && req->opcode != IORING_OP_URING_CMD) + return true; + return false; +} + static struct io_br_sel io_ring_buffer_select(struct io_kiocb *req, size_t *len, struct io_buffer_list *bl, unsigned int issue_flags) @@ -181,17 +202,7 @@ static struct io_br_sel io_ring_buffer_select(struct io_kiocb *req, size_t *len, sel.buf_list = bl; sel.addr = u64_to_user_ptr(buf->addr); - if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) { - /* - * If we came in unlocked, we have no choice but to consume the - * buffer here, otherwise nothing ensures that the buffer won't - * get used by others. This does mean it'll be pinned until the - * IO completes, coming in unlocked means we're being called from - * io-wq context and there may be further retries in async hybrid - * mode. For the locked case, the caller must call commit when - * the transfer completes (or if we get -EAGAIN and must poll of - * retry). - */ + if (io_should_commit(req, issue_flags)) { io_kbuf_commit(req, sel.buf_list, *len, 1); sel.buf_list = NULL; } diff --git a/io_uring/net.c b/io_uring/net.c index f99b90c762fc..a95cc9ca2a4d 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -383,7 +383,7 @@ static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; if (sr->flags & IORING_SEND_VECTORIZED) - return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE); + return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE); return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter); } diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index a3f11349ce06..e22f072c7d5f 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -169,7 +170,38 @@ static inline bool io_sqd_events_pending(struct io_sq_data *sqd) return READ_ONCE(sqd->state); } -static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) +struct io_sq_time { + bool started; + u64 usec; +}; + +u64 io_sq_cpu_usec(struct task_struct *tsk) +{ + u64 utime, stime; + + task_cputime_adjusted(tsk, &utime, &stime); + do_div(stime, 1000); + return stime; +} + +static void io_sq_update_worktime(struct io_sq_data *sqd, struct io_sq_time *ist) +{ + if (!ist->started) + return; + ist->started = false; + sqd->work_time += io_sq_cpu_usec(current) - ist->usec; +} + +static void io_sq_start_worktime(struct io_sq_time *ist) +{ + if (ist->started) + return; + ist->started = true; + ist->usec = io_sq_cpu_usec(current); +} + +static int __io_sq_thread(struct io_ring_ctx *ctx, struct io_sq_data *sqd, + bool cap_entries, struct io_sq_time *ist) { unsigned int to_submit; int ret = 0; @@ -182,6 +214,8 @@ static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries) if (to_submit || !wq_list_empty(&ctx->iopoll_list)) { const struct cred *creds = NULL; + io_sq_start_worktime(ist); + if (ctx->sq_creds != current_cred()) creds = override_creds(ctx->sq_creds); @@ -255,23 +289,11 @@ static bool io_sq_tw_pending(struct llist_node *retry_list) return retry_list || !llist_empty(&tctx->task_list); } -static void io_sq_update_worktime(struct io_sq_data *sqd, struct rusage *start) -{ - struct rusage end; - - getrusage(current, RUSAGE_SELF, &end); - end.ru_stime.tv_sec -= start->ru_stime.tv_sec; - end.ru_stime.tv_usec -= start->ru_stime.tv_usec; - - sqd->work_time += end.ru_stime.tv_usec + end.ru_stime.tv_sec * 1000000; -} - static int io_sq_thread(void *data) { struct llist_node *retry_list = NULL; struct io_sq_data *sqd = data; struct io_ring_ctx *ctx; - struct rusage start; unsigned long timeout = 0; char buf[TASK_COMM_LEN] = {}; DEFINE_WAIT(wait); @@ -309,6 +331,7 @@ static int io_sq_thread(void *data) mutex_lock(&sqd->lock); while (1) { bool cap_entries, sqt_spin = false; + struct io_sq_time ist = { }; if (io_sqd_events_pending(sqd) || signal_pending(current)) { if (io_sqd_handle_event(sqd)) @@ -317,9 +340,8 @@ static int io_sq_thread(void *data) } cap_entries = !list_is_singular(&sqd->ctx_list); - getrusage(current, RUSAGE_SELF, &start); list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { - int ret = __io_sq_thread(ctx, cap_entries); + int ret = __io_sq_thread(ctx, sqd, cap_entries, &ist); if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list))) sqt_spin = true; @@ -327,15 +349,18 @@ static int io_sq_thread(void *data) if (io_sq_tw(&retry_list, IORING_TW_CAP_ENTRIES_VALUE)) sqt_spin = true; - list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) - if (io_napi(ctx)) + list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { + if (io_napi(ctx)) { + io_sq_start_worktime(&ist); io_napi_sqpoll_busy_poll(ctx); + } + } + + io_sq_update_worktime(sqd, &ist); if (sqt_spin || !time_after(jiffies, timeout)) { - if (sqt_spin) { - io_sq_update_worktime(sqd, &start); + if (sqt_spin) timeout = jiffies + sqd->sq_thread_idle; - } if (unlikely(need_resched())) { mutex_unlock(&sqd->lock); cond_resched(); diff --git a/io_uring/sqpoll.h b/io_uring/sqpoll.h index b83dcdec9765..fd2f6f29b516 100644 --- a/io_uring/sqpoll.h +++ b/io_uring/sqpoll.h @@ -29,6 +29,7 @@ void io_sq_thread_unpark(struct io_sq_data *sqd); void io_put_sq_data(struct io_sq_data *sqd); void io_sqpoll_wait_sq(struct io_ring_ctx *ctx); int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask); +u64 io_sq_cpu_usec(struct task_struct *tsk); static inline struct task_struct *sqpoll_task_locked(struct io_sq_data *sqd) { diff --git a/io_uring/waitid.c b/io_uring/waitid.c index f25110fb1b12..53532ae6256c 100644 --- a/io_uring/waitid.c +++ b/io_uring/waitid.c @@ -250,7 +250,7 @@ int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return -EINVAL; iwa = io_uring_alloc_async_data(NULL, req); - if (!unlikely(iwa)) + if (unlikely(!iwa)) return -ENOMEM; iwa->req = req; diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 3ffa0d80ddd1..d1917b28761a 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -1030,7 +1030,7 @@ __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, void __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, const char *name) { - scoped_irqdesc_get_and_lock(irq, 0) + scoped_irqdesc_get_and_buslock(irq, 0) __irq_do_set_handler(scoped_irqdesc, handle, is_chained, name); } EXPORT_SYMBOL_GPL(__irq_set_handler); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index c94837382037..400856abf672 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -659,7 +659,7 @@ void __disable_irq(struct irq_desc *desc) static int __disable_irq_nosync(unsigned int irq) { - scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { + scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { __disable_irq(scoped_irqdesc); return 0; } @@ -789,7 +789,7 @@ void __enable_irq(struct irq_desc *desc) */ void enable_irq(unsigned int irq) { - scoped_irqdesc_get_and_lock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { + scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) { struct irq_desc *desc = scoped_irqdesc; if (WARN(!desc->irq_data.chip, "enable_irq before setup/request_irq: irq %u\n", irq)) diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index 2b0e88206d07..ecb251e883ea 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -67,8 +67,19 @@ static unsigned long scx_watchdog_timestamp = INITIAL_JIFFIES; static struct delayed_work scx_watchdog_work; -/* for %SCX_KICK_WAIT */ -static unsigned long __percpu *scx_kick_cpus_pnt_seqs; +/* + * For %SCX_KICK_WAIT: Each CPU has a pointer to an array of pick_task sequence + * numbers. The arrays are allocated with kvzalloc() as size can exceed percpu + * allocator limits on large machines. O(nr_cpu_ids^2) allocation, allocated + * lazily when enabling and freed when disabling to avoid waste when sched_ext + * isn't active. + */ +struct scx_kick_pseqs { + struct rcu_head rcu; + unsigned long seqs[]; +}; + +static DEFINE_PER_CPU(struct scx_kick_pseqs __rcu *, scx_kick_pseqs); /* * Direct dispatch marker. @@ -780,13 +791,23 @@ static void schedule_deferred(struct rq *rq) if (rq->scx.flags & SCX_RQ_IN_WAKEUP) return; + /* Don't do anything if there already is a deferred operation. */ + if (rq->scx.flags & SCX_RQ_BAL_CB_PENDING) + return; + /* * If in balance, the balance callbacks will be called before rq lock is * released. Schedule one. + * + * + * We can't directly insert the callback into the + * rq's list: The call can drop its lock and make the pending balance + * callback visible to unrelated code paths that call rq_pin_lock(). + * + * Just let balance_one() know that it must do it itself. */ if (rq->scx.flags & SCX_RQ_IN_BALANCE) { - queue_balance_callback(rq, &rq->scx.deferred_bal_cb, - deferred_bal_cb_workfn); + rq->scx.flags |= SCX_RQ_BAL_CB_PENDING; return; } @@ -2003,6 +2024,19 @@ static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq) dspc->cursor = 0; } +static inline void maybe_queue_balance_callback(struct rq *rq) +{ + lockdep_assert_rq_held(rq); + + if (!(rq->scx.flags & SCX_RQ_BAL_CB_PENDING)) + return; + + queue_balance_callback(rq, &rq->scx.deferred_bal_cb, + deferred_bal_cb_workfn); + + rq->scx.flags &= ~SCX_RQ_BAL_CB_PENDING; +} + static int balance_one(struct rq *rq, struct task_struct *prev) { struct scx_sched *sch = scx_root; @@ -2150,6 +2184,8 @@ static int balance_scx(struct rq *rq, struct task_struct *prev, #endif rq_repin_lock(rq, rf); + maybe_queue_balance_callback(rq); + return ret; } @@ -3471,7 +3507,9 @@ static void scx_sched_free_rcu_work(struct work_struct *work) struct scx_dispatch_q *dsq; int node; + irq_work_sync(&sch->error_irq_work); kthread_stop(sch->helper->task); + free_percpu(sch->pcpu); for_each_node_state(node, N_POSSIBLE) @@ -3850,6 +3888,27 @@ static const char *scx_exit_reason(enum scx_exit_kind kind) } } +static void free_kick_pseqs_rcu(struct rcu_head *rcu) +{ + struct scx_kick_pseqs *pseqs = container_of(rcu, struct scx_kick_pseqs, rcu); + + kvfree(pseqs); +} + +static void free_kick_pseqs(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct scx_kick_pseqs **pseqs = per_cpu_ptr(&scx_kick_pseqs, cpu); + struct scx_kick_pseqs *to_free; + + to_free = rcu_replace_pointer(*pseqs, NULL, true); + if (to_free) + call_rcu(&to_free->rcu, free_kick_pseqs_rcu); + } +} + static void scx_disable_workfn(struct kthread_work *work) { struct scx_sched *sch = container_of(work, struct scx_sched, disable_work); @@ -3986,6 +4045,7 @@ static void scx_disable_workfn(struct kthread_work *work) free_percpu(scx_dsp_ctx); scx_dsp_ctx = NULL; scx_dsp_max_batch = 0; + free_kick_pseqs(); mutex_unlock(&scx_enable_mutex); @@ -4348,6 +4408,33 @@ static void scx_vexit(struct scx_sched *sch, irq_work_queue(&sch->error_irq_work); } +static int alloc_kick_pseqs(void) +{ + int cpu; + + /* + * Allocate per-CPU arrays sized by nr_cpu_ids. Use kvzalloc as size + * can exceed percpu allocator limits on large machines. + */ + for_each_possible_cpu(cpu) { + struct scx_kick_pseqs **pseqs = per_cpu_ptr(&scx_kick_pseqs, cpu); + struct scx_kick_pseqs *new_pseqs; + + WARN_ON_ONCE(rcu_access_pointer(*pseqs)); + + new_pseqs = kvzalloc_node(struct_size(new_pseqs, seqs, nr_cpu_ids), + GFP_KERNEL, cpu_to_node(cpu)); + if (!new_pseqs) { + free_kick_pseqs(); + return -ENOMEM; + } + + rcu_assign_pointer(*pseqs, new_pseqs); + } + + return 0; +} + static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops) { struct scx_sched *sch; @@ -4495,10 +4582,14 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link) goto err_unlock; } + ret = alloc_kick_pseqs(); + if (ret) + goto err_unlock; + sch = scx_alloc_and_add_sched(ops); if (IS_ERR(sch)) { ret = PTR_ERR(sch); - goto err_unlock; + goto err_free_pseqs; } /* @@ -4701,6 +4792,8 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link) return 0; +err_free_pseqs: + free_kick_pseqs(); err_unlock: mutex_unlock(&scx_enable_mutex); return ret; @@ -5082,10 +5175,18 @@ static void kick_cpus_irq_workfn(struct irq_work *irq_work) { struct rq *this_rq = this_rq(); struct scx_rq *this_scx = &this_rq->scx; - unsigned long *pseqs = this_cpu_ptr(scx_kick_cpus_pnt_seqs); + struct scx_kick_pseqs __rcu *pseqs_pcpu = __this_cpu_read(scx_kick_pseqs); bool should_wait = false; + unsigned long *pseqs; s32 cpu; + if (unlikely(!pseqs_pcpu)) { + pr_warn_once("kick_cpus_irq_workfn() called with NULL scx_kick_pseqs"); + return; + } + + pseqs = rcu_dereference_bh(pseqs_pcpu)->seqs; + for_each_cpu(cpu, this_scx->cpus_to_kick) { should_wait |= kick_one_cpu(cpu, this_rq, pseqs); cpumask_clear_cpu(cpu, this_scx->cpus_to_kick); @@ -5208,11 +5309,6 @@ void __init init_sched_ext_class(void) scx_idle_init_masks(); - scx_kick_cpus_pnt_seqs = - __alloc_percpu(sizeof(scx_kick_cpus_pnt_seqs[0]) * nr_cpu_ids, - __alignof__(scx_kick_cpus_pnt_seqs[0])); - BUG_ON(!scx_kick_cpus_pnt_seqs); - for_each_possible_cpu(cpu) { struct rq *rq = cpu_rq(cpu); int n = cpu_to_node(cpu); @@ -5688,8 +5784,8 @@ BTF_KFUNCS_START(scx_kfunc_ids_dispatch) BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots) BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel) BTF_ID_FLAGS(func, scx_bpf_dsq_move_to_local) -BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice) -BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime) +BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU) +BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU) BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU) BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU) BTF_KFUNCS_END(scx_kfunc_ids_dispatch) @@ -5820,8 +5916,8 @@ __bpf_kfunc_end_defs(); BTF_KFUNCS_START(scx_kfunc_ids_unlocked) BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE) -BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice) -BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime) +BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_slice, KF_RCU) +BTF_ID_FLAGS(func, scx_bpf_dsq_move_set_vtime, KF_RCU) BTF_ID_FLAGS(func, scx_bpf_dsq_move, KF_RCU) BTF_ID_FLAGS(func, scx_bpf_dsq_move_vtime, KF_RCU) BTF_KFUNCS_END(scx_kfunc_ids_unlocked) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index cee1793e8277..25970dbbb279 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6437,6 +6437,16 @@ static void sync_throttle(struct task_group *tg, int cpu) cfs_rq->throttle_count = pcfs_rq->throttle_count; cfs_rq->throttled_clock_pelt = rq_clock_pelt(cpu_rq(cpu)); + + /* + * It is not enough to sync the "pelt_clock_throttled" indicator + * with the parent cfs_rq when the hierarchy is not queued. + * Always join a throttled hierarchy with PELT clock throttled + * and leaf it to the first enqueue, or distribution to + * unthrottle the PELT clock. + */ + if (cfs_rq->throttle_count) + cfs_rq->pelt_clock_throttled = 1; } /* conditionally throttle active cfs_rq's from put_prev_entity() */ @@ -13187,6 +13197,8 @@ static void propagate_entity_cfs_rq(struct sched_entity *se) if (!cfs_rq_pelt_clock_throttled(cfs_rq)) list_add_leaf_cfs_rq(cfs_rq); } + + assert_list_leaf_cfs_rq(rq_of(cfs_rq)); } #else /* !CONFIG_FAIR_GROUP_SCHED: */ static void propagate_entity_cfs_rq(struct sched_entity *se) { } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1f5d07067f60..adfb6e3409d7 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -784,6 +784,7 @@ enum scx_rq_flags { SCX_RQ_BAL_KEEP = 1 << 3, /* balance decided to keep current */ SCX_RQ_BYPASSING = 1 << 4, SCX_RQ_CLK_VALID = 1 << 5, /* RQ clock is fresh and valid */ + SCX_RQ_BAL_CB_PENDING = 1 << 6, /* must queue a cb after dispatching */ SCX_RQ_IN_WAKEUP = 1 << 16, SCX_RQ_IN_BALANCE = 1 << 17, @@ -3740,11 +3741,9 @@ static inline int mm_cid_get(struct rq *rq, struct task_struct *t, struct mm_struct *mm) { struct mm_cid __percpu *pcpu_cid = mm->pcpu_cid; - struct cpumask *cpumask; int cid; lockdep_assert_rq_held(rq); - cpumask = mm_cidmask(mm); cid = __this_cpu_read(pcpu_cid->cid); if (mm_cid_is_valid(cid)) { mm_cid_snapshot_time(rq, mm); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index b6974fce800c..3a4d3b2e3f74 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -3070,7 +3070,7 @@ static int __init tk_aux_sysfs_init(void) return -ENOMEM; } - for (int i = 0; i <= MAX_AUX_CLOCKS; i++) { + for (int i = 0; i < MAX_AUX_CLOCKS; i++) { char id[2] = { [0] = '0' + i, }; struct kobject *clk = kobject_create_and_add(id, auxo); diff --git a/kernel/trace/rv/monitors/pagefault/Kconfig b/kernel/trace/rv/monitors/pagefault/Kconfig index 5e16625f1653..0e013f00c33b 100644 --- a/kernel/trace/rv/monitors/pagefault/Kconfig +++ b/kernel/trace/rv/monitors/pagefault/Kconfig @@ -5,6 +5,7 @@ config RV_MON_PAGEFAULT select RV_LTL_MONITOR depends on RV_MON_RTAPP depends on X86 || RISCV + depends on MMU default y select LTL_MON_EVENTS_ID bool "pagefault monitor" diff --git a/kernel/trace/rv/rv.c b/kernel/trace/rv/rv.c index 48338520376f..43e9ea473cda 100644 --- a/kernel/trace/rv/rv.c +++ b/kernel/trace/rv/rv.c @@ -501,7 +501,7 @@ static void *enabled_monitors_next(struct seq_file *m, void *p, loff_t *pos) list_for_each_entry_continue(mon, &rv_monitors_list, list) { if (mon->enabled) - return mon; + return &mon->list; } return NULL; @@ -509,7 +509,7 @@ static void *enabled_monitors_next(struct seq_file *m, void *p, loff_t *pos) static void *enabled_monitors_start(struct seq_file *m, loff_t *pos) { - struct rv_monitor *mon; + struct list_head *head; loff_t l; mutex_lock(&rv_interface_lock); @@ -517,15 +517,15 @@ static void *enabled_monitors_start(struct seq_file *m, loff_t *pos) if (list_empty(&rv_monitors_list)) return NULL; - mon = list_entry(&rv_monitors_list, struct rv_monitor, list); + head = &rv_monitors_list; for (l = 0; l <= *pos; ) { - mon = enabled_monitors_next(m, mon, &l); - if (!mon) + head = enabled_monitors_next(m, head, &l); + if (!head) break; } - return mon; + return head; } /* diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index eea17e36a22b..8886055e938f 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -97,7 +97,7 @@ config CRYPTO_LIB_POLY1305 config CRYPTO_LIB_POLY1305_ARCH bool - depends on CRYPTO_LIB_POLY1305 && !UML + depends on CRYPTO_LIB_POLY1305 && !UML && !KMSAN default y if ARM default y if ARM64 && KERNEL_MODE_NEON default y if MIPS diff --git a/mm/slub.c b/mm/slub.c index a8fcc7e6f25a..d4367f25b20d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2052,9 +2052,9 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) } } -static inline void mark_failed_objexts_alloc(struct slab *slab) +static inline bool mark_failed_objexts_alloc(struct slab *slab) { - slab->obj_exts = OBJEXTS_ALLOC_FAIL; + return cmpxchg(&slab->obj_exts, 0, OBJEXTS_ALLOC_FAIL) == 0; } static inline void handle_failed_objexts_alloc(unsigned long obj_exts, @@ -2076,7 +2076,7 @@ static inline void handle_failed_objexts_alloc(unsigned long obj_exts, #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {} -static inline void mark_failed_objexts_alloc(struct slab *slab) {} +static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; } static inline void handle_failed_objexts_alloc(unsigned long obj_exts, struct slabobj_ext *vec, unsigned int objects) {} @@ -2124,8 +2124,14 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, slab_nid(slab)); } if (!vec) { - /* Mark vectors which failed to allocate */ - mark_failed_objexts_alloc(slab); + /* + * Try to mark vectors which failed to allocate. + * If this operation fails, there may be a racing process + * that has already completed the allocation. + */ + if (!mark_failed_objexts_alloc(slab) && + slab_obj_exts(slab)) + return 0; return -ENOMEM; } @@ -2136,6 +2142,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, #ifdef CONFIG_MEMCG new_exts |= MEMCG_DATA_OBJEXTS; #endif +retry: old_exts = READ_ONCE(slab->obj_exts); handle_failed_objexts_alloc(old_exts, vec, objects); if (new_slab) { @@ -2145,8 +2152,7 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, * be simply assigned. */ slab->obj_exts = new_exts; - } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) || - cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { + } else if (old_exts & ~OBJEXTS_FLAGS_MASK) { /* * If the slab is already in use, somebody can allocate and * assign slabobj_exts in parallel. In this case the existing @@ -2158,6 +2164,9 @@ int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, else kfree(vec); return 0; + } else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { + /* Retry if a racing thread changed slab->obj_exts from under us. */ + goto retry; } if (allow_spin) @@ -3419,7 +3428,6 @@ static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab, if (!allow_spin && !spin_trylock_irqsave(&n->list_lock, flags)) { /* Unlucky, discard newly allocated slab */ - slab->frozen = 1; defer_deactivate_slab(slab, NULL); return NULL; } @@ -6468,9 +6476,12 @@ static void free_deferred_objects(struct irq_work *work) struct slab *slab = container_of(pos, struct slab, llnode); #ifdef CONFIG_SLUB_TINY - discard_slab(slab->slab_cache, slab); + free_slab(slab->slab_cache, slab); #else - deactivate_slab(slab->slab_cache, slab, slab->flush_freelist); + if (slab->frozen) + deactivate_slab(slab->slab_cache, slab, slab->flush_freelist); + else + free_slab(slab->slab_cache, slab); #endif } } diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index c84420cb410d..a662408ad867 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -763,11 +763,16 @@ int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb) bat_priv = netdev_priv(mesh_iface); primary_if = batadv_primary_if_get_selected(bat_priv); - if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { + if (!primary_if) { ret = -ENOENT; goto out_put_mesh_iface; } + if (primary_if->if_status != BATADV_IF_ACTIVE) { + ret = -ENOENT; + goto out_put_primary_if; + } + hard_iface = batadv_netlink_get_hardif(bat_priv, cb); if (IS_ERR(hard_iface) && PTR_ERR(hard_iface) != -ENONET) { ret = PTR_ERR(hard_iface); @@ -1327,11 +1332,16 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb) bat_priv = netdev_priv(mesh_iface); primary_if = batadv_primary_if_get_selected(bat_priv); - if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { + if (!primary_if) { ret = -ENOENT; goto out_put_mesh_iface; } + if (primary_if->if_status != BATADV_IF_ACTIVE) { + ret = -ENOENT; + goto out_put_primary_if; + } + hard_iface = batadv_netlink_get_hardif(bat_priv, cb); if (IS_ERR(hard_iface) && PTR_ERR(hard_iface) != -ENONET) { ret = PTR_ERR(hard_iface); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 111f0e37b672..c5dedf39a129 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -843,6 +843,13 @@ static void bis_cleanup(struct hci_conn *conn) if (bis) return; + bis = hci_conn_hash_lookup_big_state(hdev, + conn->iso_qos.bcast.big, + BT_OPEN, + HCI_ROLE_MASTER); + if (bis) + return; + hci_le_terminate_big(hdev, conn); } else { hci_le_big_terminate(hdev, conn->iso_qos.bcast.big, diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index d790b0d4eb9a..d37db364acf7 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1607,8 +1607,10 @@ static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, hci_dev_set_flag(hdev, HCI_LE_ADV); - if (adv && !adv->periodic) + if (adv) adv->enabled = true; + else if (!set->handle) + hci_dev_set_flag(hdev, HCI_LE_ADV_0); conn = hci_lookup_le_connect(hdev); if (conn) @@ -1619,6 +1621,8 @@ static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, if (cp->num_of_sets) { if (adv) adv->enabled = false; + else if (!set->handle) + hci_dev_clear_flag(hdev, HCI_LE_ADV_0); /* If just one instance was disabled check if there are * any other instance enabled before clearing HCI_LE_ADV @@ -3959,8 +3963,11 @@ static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data, hci_dev_set_flag(hdev, HCI_LE_PER_ADV); if (adv) - adv->enabled = true; + adv->periodic_enabled = true; } else { + if (adv) + adv->periodic_enabled = false; + /* If just one instance was disabled check if there are * any other instance enabled before clearing HCI_LE_PER_ADV. * The current periodic adv instance will be marked as diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index eefdb6134ca5..73fc41b68b68 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -863,11 +863,17 @@ bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev, { struct hci_cmd_sync_work_entry *entry; - entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy); - if (!entry) - return false; + mutex_lock(&hdev->cmd_sync_work_lock); - hci_cmd_sync_cancel_entry(hdev, entry); + entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy); + if (!entry) { + mutex_unlock(&hdev->cmd_sync_work_lock); + return false; + } + + _hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); + + mutex_unlock(&hdev->cmd_sync_work_lock); return true; } @@ -1601,7 +1607,7 @@ int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance) /* If periodic advertising already disabled there is nothing to do. */ adv = hci_find_adv_instance(hdev, instance); - if (!adv || !adv->periodic || !adv->enabled) + if (!adv || !adv->periodic_enabled) return 0; memset(&cp, 0, sizeof(cp)); @@ -1666,7 +1672,7 @@ static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance) /* If periodic advertising already enabled there is nothing to do. */ adv = hci_find_adv_instance(hdev, instance); - if (adv && adv->periodic && adv->enabled) + if (adv && adv->periodic_enabled) return 0; memset(&cp, 0, sizeof(cp)); @@ -2600,9 +2606,8 @@ static int hci_resume_advertising_sync(struct hci_dev *hdev) /* If current advertising instance is set to instance 0x00 * then we need to re-enable it. */ - if (!hdev->cur_adv_instance) - err = hci_enable_ext_advertising_sync(hdev, - hdev->cur_adv_instance); + if (hci_dev_test_and_clear_flag(hdev, HCI_LE_ADV_0)) + err = hci_enable_ext_advertising_sync(hdev, 0x00); } else { /* Schedule for most recent instance to be restarted and begin * the software rotation loop diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index 9b263d061e05..3d98cb6291da 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -2032,7 +2032,7 @@ static void iso_conn_ready(struct iso_conn *conn) */ if (!bacmp(&hcon->dst, BDADDR_ANY)) { bacpy(&hcon->dst, &iso_pi(parent)->dst); - hcon->dst_type = iso_pi(parent)->dst_type; + hcon->dst_type = le_addr_type(iso_pi(parent)->dst_type); } if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) { @@ -2046,7 +2046,13 @@ static void iso_conn_ready(struct iso_conn *conn) } bacpy(&iso_pi(sk)->dst, &hcon->dst); - iso_pi(sk)->dst_type = hcon->dst_type; + + /* Convert from HCI to three-value type */ + if (hcon->dst_type == ADDR_LE_DEV_PUBLIC) + iso_pi(sk)->dst_type = BDADDR_LE_PUBLIC; + else + iso_pi(sk)->dst_type = BDADDR_LE_RANDOM; + iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle; memcpy(iso_pi(sk)->base, iso_pi(parent)->base, iso_pi(parent)->base_len); iso_pi(sk)->base_len = iso_pi(parent)->base_len; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 805c752ac0a9..d08320380ad6 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -282,7 +282,7 @@ static void __set_retrans_timer(struct l2cap_chan *chan) if (!delayed_work_pending(&chan->monitor_timer) && chan->retrans_timeout) { l2cap_set_timer(chan, &chan->retrans_timer, - secs_to_jiffies(chan->retrans_timeout)); + msecs_to_jiffies(chan->retrans_timeout)); } } @@ -291,7 +291,7 @@ static void __set_monitor_timer(struct l2cap_chan *chan) __clear_retrans_timer(chan); if (chan->monitor_timeout) { l2cap_set_timer(chan, &chan->monitor_timer, - secs_to_jiffies(chan->monitor_timeout)); + msecs_to_jiffies(chan->monitor_timeout)); } } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index a3d16eece0d2..24e335e3a727 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2175,19 +2175,24 @@ static void set_mesh_complete(struct hci_dev *hdev, void *data, int err) sk = cmd->sk; if (status) { + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, + status); mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true, cmd_status_rsp, &status); - return; + goto done; } - mgmt_pending_remove(cmd); mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0); + +done: + mgmt_pending_free(cmd); } static int set_mesh_sync(struct hci_dev *hdev, void *data) { struct mgmt_pending_cmd *cmd = data; - struct mgmt_cp_set_mesh cp; + DEFINE_FLEX(struct mgmt_cp_set_mesh, cp, ad_types, num_ad_types, + sizeof(hdev->mesh_ad_types)); size_t len; mutex_lock(&hdev->mgmt_pending_lock); @@ -2197,27 +2202,26 @@ static int set_mesh_sync(struct hci_dev *hdev, void *data) return -ECANCELED; } - memcpy(&cp, cmd->param, sizeof(cp)); + len = cmd->param_len; + memcpy(cp, cmd->param, min(__struct_size(cp), len)); mutex_unlock(&hdev->mgmt_pending_lock); - len = cmd->param_len; - memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types)); - if (cp.enable) + if (cp->enable) hci_dev_set_flag(hdev, HCI_MESH); else hci_dev_clear_flag(hdev, HCI_MESH); - hdev->le_scan_interval = __le16_to_cpu(cp.period); - hdev->le_scan_window = __le16_to_cpu(cp.window); + hdev->le_scan_interval = __le16_to_cpu(cp->period); + hdev->le_scan_window = __le16_to_cpu(cp->window); - len -= sizeof(cp); + len -= sizeof(struct mgmt_cp_set_mesh); /* If filters don't fit, forward all adv pkts */ if (len <= sizeof(hdev->mesh_ad_types)) - memcpy(hdev->mesh_ad_types, cp.ad_types, len); + memcpy(hdev->mesh_ad_types, cp->ad_types, len); hci_update_passive_scan_sync(hdev); return 0; diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 376ce6de84be..b783526ab588 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -643,8 +643,8 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) tty_port_tty_hangup(&dev->port, true); dev->modem_status = - ((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) | - ((v24_sig & RFCOMM_V24_RTR) ? (TIOCM_RTS | TIOCM_CTS) : 0) | + ((v24_sig & RFCOMM_V24_RTC) ? TIOCM_DSR : 0) | + ((v24_sig & RFCOMM_V24_RTR) ? TIOCM_CTS : 0) | ((v24_sig & RFCOMM_V24_IC) ? TIOCM_RI : 0) | ((v24_sig & RFCOMM_V24_DV) ? TIOCM_CD : 0); } @@ -1055,10 +1055,14 @@ static void rfcomm_tty_hangup(struct tty_struct *tty) static int rfcomm_tty_tiocmget(struct tty_struct *tty) { struct rfcomm_dev *dev = tty->driver_data; + struct rfcomm_dlc *dlc = dev->dlc; + u8 v24_sig; BT_DBG("tty %p dev %p", tty, dev); - return dev->modem_status; + rfcomm_dlc_get_modem_status(dlc, &v24_sig); + + return (v24_sig & (TIOCM_DTR | TIOCM_RTS)) | dev->modem_status; } static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) @@ -1071,23 +1075,15 @@ static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigne rfcomm_dlc_get_modem_status(dlc, &v24_sig); - if (set & TIOCM_DSR || set & TIOCM_DTR) + if (set & TIOCM_DTR) v24_sig |= RFCOMM_V24_RTC; - if (set & TIOCM_RTS || set & TIOCM_CTS) + if (set & TIOCM_RTS) v24_sig |= RFCOMM_V24_RTR; - if (set & TIOCM_RI) - v24_sig |= RFCOMM_V24_IC; - if (set & TIOCM_CD) - v24_sig |= RFCOMM_V24_DV; - if (clear & TIOCM_DSR || clear & TIOCM_DTR) + if (clear & TIOCM_DTR) v24_sig &= ~RFCOMM_V24_RTC; - if (clear & TIOCM_RTS || clear & TIOCM_CTS) + if (clear & TIOCM_RTS) v24_sig &= ~RFCOMM_V24_RTR; - if (clear & TIOCM_RI) - v24_sig &= ~RFCOMM_V24_IC; - if (clear & TIOCM_CD) - v24_sig &= ~RFCOMM_V24_DV; rfcomm_dlc_set_modem_status(dlc, v24_sig); diff --git a/net/core/devmem.c b/net/core/devmem.c index d9de31a6cc7f..1d04754bc756 100644 --- a/net/core/devmem.c +++ b/net/core/devmem.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "devmem.h" @@ -357,7 +358,8 @@ struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk, unsigned int dmabuf_id) { struct net_devmem_dmabuf_binding *binding; - struct dst_entry *dst = __sk_dst_get(sk); + struct net_device *dst_dev; + struct dst_entry *dst; int err = 0; binding = net_devmem_lookup_dmabuf(dmabuf_id); @@ -366,16 +368,35 @@ struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk, goto out_err; } + rcu_read_lock(); + dst = __sk_dst_get(sk); + /* If dst is NULL (route expired), attempt to rebuild it. */ + if (unlikely(!dst)) { + if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) { + err = -EHOSTUNREACH; + goto out_unlock; + } + dst = __sk_dst_get(sk); + if (unlikely(!dst)) { + err = -ENODEV; + goto out_unlock; + } + } + /* The dma-addrs in this binding are only reachable to the corresponding * net_device. */ - if (!dst || !dst->dev || dst->dev->ifindex != binding->dev->ifindex) { + dst_dev = dst_dev_rcu(dst); + if (unlikely(!dst_dev) || unlikely(dst_dev != binding->dev)) { err = -ENODEV; - goto out_err; + goto out_unlock; } + rcu_read_unlock(); return binding; +out_unlock: + rcu_read_unlock(); out_err: if (binding) net_devmem_dmabuf_binding_put(binding); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index ff19f6e54d55..6db1d4c36a88 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -891,18 +891,27 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, } } -void tcp_rcvbuf_grow(struct sock *sk) +void tcp_rcvbuf_grow(struct sock *sk, u32 newval) { const struct net *net = sock_net(sk); struct tcp_sock *tp = tcp_sk(sk); - int rcvwin, rcvbuf, cap; + u32 rcvwin, rcvbuf, cap, oldval; + u64 grow; + + oldval = tp->rcvq_space.space; + tp->rcvq_space.space = newval; if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) || (sk->sk_userlocks & SOCK_RCVBUF_LOCK)) return; + /* DRS is always one RTT late. */ + rcvwin = newval << 1; + /* slow start: allow the sender to double its rate. */ - rcvwin = tp->rcvq_space.space << 1; + grow = (u64)rcvwin * (newval - oldval); + do_div(grow, oldval); + rcvwin += grow << 1; if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) rcvwin += TCP_SKB_CB(tp->ooo_last_skb)->end_seq - tp->rcv_nxt; @@ -949,9 +958,7 @@ void tcp_rcv_space_adjust(struct sock *sk) trace_tcp_rcvbuf_grow(sk, time); - tp->rcvq_space.space = copied; - - tcp_rcvbuf_grow(sk); + tcp_rcvbuf_grow(sk, copied); new_measure: tp->rcvq_space.seq = tp->copied_seq; @@ -5276,7 +5283,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) } /* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */ if (sk->sk_socket) - tcp_rcvbuf_grow(sk); + tcp_rcvbuf_grow(sk, tp->rcvq_space.space); } static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index d9aca1c3c097..c52b0456039d 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1876,6 +1876,9 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev, link_conf->nontransmitted = false; link_conf->ema_ap = false; link_conf->bssid_indicator = 0; + link_conf->fils_discovery.min_interval = 0; + link_conf->fils_discovery.max_interval = 0; + link_conf->unsol_bcast_probe_resp_interval = 0; __sta_info_flush(sdata, true, link_id, NULL); diff --git a/net/mac80211/key.c b/net/mac80211/key.c index b14e9cd9713f..d5da7ccea66e 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -508,11 +508,16 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata, ret = ieee80211_key_enable_hw_accel(new); } } else { - if (!new->local->wowlan) + if (!new->local->wowlan) { ret = ieee80211_key_enable_hw_accel(new); - else if (link_id < 0 || !sdata->vif.active_links || - BIT(link_id) & sdata->vif.active_links) + } else if (link_id < 0 || !sdata->vif.active_links || + BIT(link_id) & sdata->vif.active_links) { new->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; + if (!(new->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | + IEEE80211_KEY_FLAG_PUT_MIC_SPACE | + IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) + decrease_tailroom_need_count(sdata, 1); + } } if (ret) diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c index 6003e47c770a..171643815076 100644 --- a/net/mptcp/mib.c +++ b/net/mptcp/mib.c @@ -85,6 +85,7 @@ static const struct snmp_mib mptcp_snmp_list[] = { SNMP_MIB_ITEM("DssFallback", MPTCP_MIB_DSSFALLBACK), SNMP_MIB_ITEM("SimultConnectFallback", MPTCP_MIB_SIMULTCONNFALLBACK), SNMP_MIB_ITEM("FallbackFailed", MPTCP_MIB_FALLBACKFAILED), + SNMP_MIB_ITEM("WinProbe", MPTCP_MIB_WINPROBE), }; /* mptcp_mib_alloc - allocate percpu mib counters diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h index 309bac6fea32..a1d3e9369fbb 100644 --- a/net/mptcp/mib.h +++ b/net/mptcp/mib.h @@ -88,6 +88,7 @@ enum linux_mptcp_mib_field { MPTCP_MIB_DSSFALLBACK, /* Bad or missing DSS */ MPTCP_MIB_SIMULTCONNFALLBACK, /* Simultaneous connect */ MPTCP_MIB_FALLBACKFAILED, /* Can't fallback due to msk status */ + MPTCP_MIB_WINPROBE, /* MPTCP-level zero window probe */ __MPTCP_MIB_MAX }; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 94a5f6dcc577..d568575cdcb5 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -194,17 +194,26 @@ static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to, * - mptcp does not maintain a msk-level window clamp * - returns true when the receive buffer is actually updated */ -static bool mptcp_rcvbuf_grow(struct sock *sk) +static bool mptcp_rcvbuf_grow(struct sock *sk, u32 newval) { struct mptcp_sock *msk = mptcp_sk(sk); const struct net *net = sock_net(sk); - int rcvwin, rcvbuf, cap; + u32 rcvwin, rcvbuf, cap, oldval; + u64 grow; + oldval = msk->rcvq_space.space; + msk->rcvq_space.space = newval; if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) || (sk->sk_userlocks & SOCK_RCVBUF_LOCK)) return false; - rcvwin = msk->rcvq_space.space << 1; + /* DRS is always one RTT late. */ + rcvwin = newval << 1; + + /* slow start: allow the sender to double its rate. */ + grow = (u64)rcvwin * (newval - oldval); + do_div(grow, oldval); + rcvwin += grow << 1; if (!RB_EMPTY_ROOT(&msk->out_of_order_queue)) rcvwin += MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq - msk->ack_seq; @@ -334,7 +343,7 @@ static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb) skb_set_owner_r(skb, sk); /* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */ if (sk->sk_socket) - mptcp_rcvbuf_grow(sk); + mptcp_rcvbuf_grow(sk, msk->rcvq_space.space); } static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset, @@ -998,7 +1007,7 @@ static void __mptcp_clean_una(struct sock *sk) if (WARN_ON_ONCE(!msk->recovery)) break; - WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); + msk->first_pending = mptcp_send_next(sk); } dfrag_clear(sk, dfrag); @@ -1291,7 +1300,12 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, if (copy == 0) { u64 snd_una = READ_ONCE(msk->snd_una); - if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) { + /* No need for zero probe if there are any data pending + * either at the msk or ssk level; skb is the current write + * queue tail and can be empty at this point. + */ + if (snd_una != msk->snd_nxt || skb->len || + skb != tcp_send_head(ssk)) { tcp_remove_empty_skb(ssk); return 0; } @@ -1342,6 +1356,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, mpext->dsn64); if (zero_window_probe) { + MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_WINPROBE); mptcp_subflow_ctx(ssk)->rel_write_seq += copy; mpext->frozen = 1; if (READ_ONCE(msk->csum_enabled)) @@ -1544,7 +1559,7 @@ static int __subflow_push_pending(struct sock *sk, struct sock *ssk, mptcp_update_post_push(msk, dfrag, ret); } - WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); + msk->first_pending = mptcp_send_next(sk); if (msk->snd_burst <= 0 || !sk_stream_memory_free(ssk) || @@ -1904,7 +1919,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) get_page(dfrag->page); list_add_tail(&dfrag->list, &msk->rtx_queue); if (!msk->first_pending) - WRITE_ONCE(msk->first_pending, dfrag); + msk->first_pending = dfrag; } pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk, dfrag->data_seq, dfrag->data_len, dfrag->already_sent, @@ -1937,22 +1952,36 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied); -static int __mptcp_recvmsg_mskq(struct sock *sk, - struct msghdr *msg, - size_t len, int flags, +static int __mptcp_recvmsg_mskq(struct sock *sk, struct msghdr *msg, + size_t len, int flags, int copied_total, struct scm_timestamping_internal *tss, int *cmsg_flags) { struct mptcp_sock *msk = mptcp_sk(sk); struct sk_buff *skb, *tmp; + int total_data_len = 0; int copied = 0; skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) { - u32 offset = MPTCP_SKB_CB(skb)->offset; + u32 delta, offset = MPTCP_SKB_CB(skb)->offset; u32 data_len = skb->len - offset; - u32 count = min_t(size_t, len - copied, data_len); + u32 count; int err; + if (flags & MSG_PEEK) { + /* skip already peeked skbs */ + if (total_data_len + data_len <= copied_total) { + total_data_len += data_len; + continue; + } + + /* skip the already peeked data in the current skb */ + delta = copied_total - total_data_len; + offset += delta; + data_len -= delta; + } + + count = min_t(size_t, len - copied, data_len); if (!(flags & MSG_TRUNC)) { err = skb_copy_datagram_msg(skb, offset, msg, count); if (unlikely(err < 0)) { @@ -1969,16 +1998,14 @@ static int __mptcp_recvmsg_mskq(struct sock *sk, copied += count; - if (count < data_len) { - if (!(flags & MSG_PEEK)) { + if (!(flags & MSG_PEEK)) { + msk->bytes_consumed += count; + if (count < data_len) { MPTCP_SKB_CB(skb)->offset += count; MPTCP_SKB_CB(skb)->map_seq += count; - msk->bytes_consumed += count; + break; } - break; - } - if (!(flags & MSG_PEEK)) { /* avoid the indirect call, we know the destructor is sock_rfree */ skb->destructor = NULL; skb->sk = NULL; @@ -1986,7 +2013,6 @@ static int __mptcp_recvmsg_mskq(struct sock *sk, sk_mem_uncharge(sk, skb->truesize); __skb_unlink(skb, &sk->sk_receive_queue); skb_attempt_defer_free(skb); - msk->bytes_consumed += count; } if (copied >= len) @@ -2050,9 +2076,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) if (msk->rcvq_space.copied <= msk->rcvq_space.space) goto new_measure; - msk->rcvq_space.space = msk->rcvq_space.copied; - if (mptcp_rcvbuf_grow(sk)) { - + if (mptcp_rcvbuf_grow(sk, msk->rcvq_space.copied)) { /* Make subflows follow along. If we do not do this, we * get drops at subflow level if skbs can't be moved to * the mptcp rx queue fast enough (announced rcv_win can @@ -2064,8 +2088,9 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) ssk = mptcp_subflow_tcp_sock(subflow); slow = lock_sock_fast(ssk); - tcp_sk(ssk)->rcvq_space.space = msk->rcvq_space.copied; - tcp_rcvbuf_grow(ssk); + /* subflows can be added before tcp_init_transfer() */ + if (tcp_sk(ssk)->rcvq_space.space) + tcp_rcvbuf_grow(ssk, msk->rcvq_space.copied); unlock_sock_fast(ssk, slow); } } @@ -2184,7 +2209,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, while (copied < len) { int err, bytes_read; - bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags, &tss, &cmsg_flags); + bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags, + copied, &tss, &cmsg_flags); if (unlikely(bytes_read < 0)) { if (!copied) copied = bytes_read; @@ -2875,7 +2901,7 @@ static void __mptcp_clear_xmit(struct sock *sk) struct mptcp_sock *msk = mptcp_sk(sk); struct mptcp_data_frag *dtmp, *dfrag; - WRITE_ONCE(msk->first_pending, NULL); + msk->first_pending = NULL; list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) dfrag_clear(sk, dfrag); } @@ -3415,9 +3441,6 @@ void __mptcp_data_acked(struct sock *sk) void __mptcp_check_push(struct sock *sk, struct sock *ssk) { - if (!mptcp_send_head(sk)) - return; - if (!sock_owned_by_user(sk)) __mptcp_subflow_push_pending(sk, ssk, false); else diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 52f9cfa4ce95..379a88e14e8d 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -414,7 +414,7 @@ static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk) { const struct mptcp_sock *msk = mptcp_sk(sk); - return READ_ONCE(msk->first_pending); + return msk->first_pending; } static inline struct mptcp_data_frag *mptcp_send_next(struct sock *sk) diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c index 92b984fa8175..fc35a11cdca2 100644 --- a/net/netfilter/nft_connlimit.c +++ b/net/netfilter/nft_connlimit.c @@ -48,7 +48,7 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv, return; } - count = priv->list->count; + count = READ_ONCE(priv->list->count); if ((count > priv->limit) ^ priv->invert) { regs->verdict.code = NFT_BREAK; diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index d526e69a2a2b..6f2ae7cad731 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -22,6 +22,7 @@ #include #include #include +#include struct nft_ct_helper_obj { struct nf_conntrack_helper *helper4; @@ -379,6 +380,14 @@ static bool nft_ct_tmpl_alloc_pcpu(void) } #endif +static void __nft_ct_get_destroy(const struct nft_ctx *ctx, struct nft_ct *priv) +{ +#ifdef CONFIG_NF_CONNTRACK_LABELS + if (priv->key == NFT_CT_LABELS) + nf_connlabels_put(ctx->net); +#endif +} + static int nft_ct_get_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) @@ -413,6 +422,10 @@ static int nft_ct_get_init(const struct nft_ctx *ctx, if (tb[NFTA_CT_DIRECTION] != NULL) return -EINVAL; len = NF_CT_LABELS_MAX_SIZE; + + err = nf_connlabels_get(ctx->net, (len * BITS_PER_BYTE) - 1); + if (err) + return err; break; #endif case NFT_CT_HELPER: @@ -494,7 +507,8 @@ static int nft_ct_get_init(const struct nft_ctx *ctx, case IP_CT_DIR_REPLY: break; default: - return -EINVAL; + err = -EINVAL; + goto err; } } @@ -502,11 +516,11 @@ static int nft_ct_get_init(const struct nft_ctx *ctx, err = nft_parse_register_store(ctx, tb[NFTA_CT_DREG], &priv->dreg, NULL, NFT_DATA_VALUE, len); if (err < 0) - return err; + goto err; err = nf_ct_netns_get(ctx->net, ctx->family); if (err < 0) - return err; + goto err; if (priv->key == NFT_CT_BYTES || priv->key == NFT_CT_PKTS || @@ -514,6 +528,9 @@ static int nft_ct_get_init(const struct nft_ctx *ctx, nf_ct_set_acct(ctx->net, true); return 0; +err: + __nft_ct_get_destroy(ctx, priv); + return err; } static void __nft_ct_set_destroy(const struct nft_ctx *ctx, struct nft_ct *priv) @@ -626,6 +643,9 @@ static int nft_ct_set_init(const struct nft_ctx *ctx, static void nft_ct_get_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) { + struct nft_ct *priv = nft_expr_priv(expr); + + __nft_ct_get_destroy(ctx, priv); nf_ct_netns_put(ctx->net, ctx->family); } @@ -1173,6 +1193,10 @@ static void nft_ct_helper_obj_eval(struct nft_object *obj, if (help) { rcu_assign_pointer(help->helper, to_assign); set_bit(IPS_HELPER_BIT, &ct->status); + + if ((ct->status & IPS_NAT_MASK) && !nfct_seqadj(ct)) + if (!nfct_seqadj_ext_add(ct)) + regs->verdict.code = NF_DROP; } } diff --git a/net/sctp/input.c b/net/sctp/input.c index 7e99894778d4..e119e460ccde 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -190,7 +190,7 @@ int sctp_rcv(struct sk_buff *skb) goto discard_release; nf_reset_ct(skb); - if (sk_filter(sk, skb)) + if (sk_filter(sk, skb) || skb->len < sizeof(struct sctp_chunkhdr)) goto discard_release; /* Create an SCTP packet structure. */ diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 4d29b390aed9..82ea407e520a 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -724,8 +724,10 @@ tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async, /* shouldn't get to wraparound: * too long in async stage, something bad happened */ - if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) + if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) { + tls_offload_rx_resync_async_request_cancel(resync_async); return false; + } /* asynchronous stage: log all headers seq such that * req_seq <= seq <= end_seq, and wait for real resync request diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index ceca47cd9e25..2187e148389d 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4139,8 +4139,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) rdev->wiphy.txq_quantum = old_txq_quantum; } - if (old_rts_threshold) - kfree(old_radio_rts_threshold); + kfree(old_radio_rts_threshold); return result; } diff --git a/rust/kernel/auxiliary.rs b/rust/kernel/auxiliary.rs index e11848bbf206..7a3b0b9c418e 100644 --- a/rust/kernel/auxiliary.rs +++ b/rust/kernel/auxiliary.rs @@ -217,13 +217,7 @@ pub fn id(&self) -> u32 { /// Returns a reference to the parent [`device::Device`], if any. pub fn parent(&self) -> Option<&device::Device> { - let ptr: *const Self = self; - // CAST: `Device` types are transparent to each other. - let ptr: *const Device = ptr.cast(); - // SAFETY: `ptr` was derived from `&self`. - let this = unsafe { &*ptr }; - - this.as_ref().parent() + self.as_ref().parent() } } diff --git a/rust/kernel/device.rs b/rust/kernel/device.rs index 1321e6f0b53c..a849b7dde2fd 100644 --- a/rust/kernel/device.rs +++ b/rust/kernel/device.rs @@ -251,7 +251,7 @@ pub(crate) fn as_raw(&self) -> *mut bindings::device { /// Returns a reference to the parent device, if any. #[cfg_attr(not(CONFIG_AUXILIARY_BUS), expect(dead_code))] - pub(crate) fn parent(&self) -> Option<&Self> { + pub(crate) fn parent(&self) -> Option<&Device> { // SAFETY: // - By the type invariant `self.as_raw()` is always valid. // - The parent device is only ever set at device creation. @@ -264,7 +264,7 @@ pub(crate) fn parent(&self) -> Option<&Self> { // - Since `parent` is not NULL, it must be a valid pointer to a `struct device`. // - `parent` is valid for the lifetime of `self`, since a `struct device` holds a // reference count of its parent. - Some(unsafe { Self::from_raw(parent) }) + Some(unsafe { Device::from_raw(parent) }) } } diff --git a/tools/net/ynl/lib/ynl-priv.h b/tools/net/ynl/lib/ynl-priv.h index 29481989ea76..ced7dce44efb 100644 --- a/tools/net/ynl/lib/ynl-priv.h +++ b/tools/net/ynl/lib/ynl-priv.h @@ -313,7 +313,7 @@ ynl_attr_put_str(struct nlmsghdr *nlh, unsigned int attr_type, const char *str) struct nlattr *attr; size_t len; - len = strlen(str); + len = strlen(str) + 1; if (__ynl_attr_put_overflow(nlh, len)) return; @@ -321,7 +321,7 @@ ynl_attr_put_str(struct nlmsghdr *nlh, unsigned int attr_type, const char *str) attr->nla_type = attr_type; strcpy((char *)ynl_attr_data(attr), str); - attr->nla_len = NLA_HDRLEN + NLA_ALIGN(len); + attr->nla_len = NLA_HDRLEN + len; nlh->nlmsg_len += NLMSG_ALIGN(attr->nla_len); } diff --git a/tools/net/ynl/pyynl/ethtool.py b/tools/net/ynl/pyynl/ethtool.py index 9b523cbb3568..fd0f6b8d54d1 100755 --- a/tools/net/ynl/pyynl/ethtool.py +++ b/tools/net/ynl/pyynl/ethtool.py @@ -44,6 +44,9 @@ def print_field(reply, *desc): Pretty-print a set of fields from the reply. desc specifies the fields and the optional type (bool/yn). """ + if not reply: + return + if len(desc) == 0: return print_field(reply, *zip(reply.keys(), reply.keys())) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index a5770570b106..620854fdaaf6 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -217,6 +217,7 @@ static bool is_rust_noreturn(const struct symbol *func) * these come from the Rust standard library). */ return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") || + str_ends_with(func->name, "_4core6option13expect_failed") || str_ends_with(func->name, "_4core6option13unwrap_failed") || str_ends_with(func->name, "_4core6result13unwrap_failed") || str_ends_with(func->name, "_4core9panicking5panic") || @@ -4710,8 +4711,8 @@ static int check_abs_references(struct objtool_file *file) for_each_reloc(sec->rsec, reloc) { if (arch_absolute_reloc(file->elf, reloc)) { - WARN("section %s has absolute relocation at offset 0x%lx", - sec->name, reloc_offset(reloc)); + WARN("section %s has absolute relocation at offset 0x%llx", + sec->name, (unsigned long long)reloc_offset(reloc)); ret++; } } diff --git a/tools/testing/selftests/net/bareudp.sh b/tools/testing/selftests/net/bareudp.sh index 4046131e7888..d9e5b967f815 100755 --- a/tools/testing/selftests/net/bareudp.sh +++ b/tools/testing/selftests/net/bareudp.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # SPDX-License-Identifier: GPL-2.0 # Test various bareudp tunnel configurations.